/** Annotation remove event */ public void annotationRemoved(AnnotationSetEvent ase) { if (!disableListener && ase.getSourceDocument() == this) { AnnotationSet as = (AnnotationSet) ase.getSource(); Annotation annot = ase.getAnnotation(); FeatureMap features = Factory.newFeatureMap(); features.putAll(annot.getFeatures()); boolean defaultAS = as.getName() == null; for (String docID : combinedDocumentIds) { Document aDoc = compoundDocument.getDocument(docID); // find out the details which refer to the deleted annotation OffsetDetails od = getOffsetDetails(docID, as.getName(), annot); if (od == null) continue; if (defaultAS) { aDoc.getAnnotations().remove(od.getOriginalAnnotation()); } else { aDoc.getAnnotations(as.getName()).remove(od.getOriginalAnnotation()); } removeOffsetDetails(docID, od); break; } } }
/** Test the default tokeniser */ public void testHashGazetteer() throws Exception { // get a document Document doc = Factory.newDocument(new URL(TestDocument.getTestServerName() + "tests/doc0.html")); System.out.println(doc.getFeatures().get("gate.SourceURL")); // create a default gazetteer FeatureMap params = Factory.newFeatureMap(); HashGazetteer gaz = (HashGazetteer) Factory.createResource("com.ontotext.gate.gazetteer.HashGazetteer", params); // runtime stuff gaz.setDocument(doc); gaz.setAnnotationSetName(GAZ_AS); gaz.execute(); assertTrue( "the Annotation set resulting of the execution of the OntoText " + "Natural Gazetteer is empty.", !doc.getAnnotations(GAZ_AS).isEmpty()); // check whether the annotations are as expected assertEquals("wrong number of lookup annotations found", 76, doc.getAnnotations(GAZ_AS).size()); } // testHashGazetteer();
public void tokenize() { AnnotationSet tokenizationAs = gateDocument.getAnnotations("Tokenization"); AnnotationSet defaultAs = gateDocument.getAnnotations(""); for (Iterator<Annotation> it = tokenizationAs.iterator(); it.hasNext(); ) { Annotation currentTokenAnnotation = it.next(); FeatureMap tokenFeaturesMap = currentTokenAnnotation.getFeatures(); FeatureMap curFeaturesMap = Factory.newFeatureMap(); if ("Token".compareToIgnoreCase(currentTokenAnnotation.getType()) == 0) { curFeaturesMap.put("string", tokenFeaturesMap.get("string")); curFeaturesMap.put("root", tokenFeaturesMap.get("lemma")); curFeaturesMap.put("category", tokenFeaturesMap.get("POS")); // Add the new Token to the Annotation Set defaultAs.add( currentTokenAnnotation.getStartNode(), currentTokenAnnotation.getEndNode(), currentTokenAnnotation.getType(), curFeaturesMap); } } gateDocument.removeAnnotationSet("Tokenization"); }
@SuppressWarnings("unchecked") public Set<String> processDoc(String str) throws Exception { Set<String> toReturn = new HashSet<String>(); Corpus c = null; Document aDoc = null; try { c = Factory.newCorpus("sample"); aDoc = Factory.newDocument(str); c.add(aDoc); controller.setCorpus(c); controller.execute(); AnnotationSet aSet = aDoc.getAnnotations("StockSymbols"); for (Annotation annot : aSet) { String symbol = (String) annot.getFeatures().get("sym"); toReturn.add(symbol); } } catch (Exception e) { throw e; } finally { if (aDoc != null) { Factory.deleteResource(aDoc); } if (c != null) { Factory.deleteResource(c); } } return toReturn; }
/** Annotation added event */ public void annotationAdded(AnnotationSetEvent ase) { if (!disableListener && ase.getSourceDocument() == this) { AnnotationSet as = (AnnotationSet) ase.getSource(); Annotation annot = ase.getAnnotation(); annot.addAnnotationListener(this); FeatureMap features = Factory.newFeatureMap(); features.putAll(annot.getFeatures()); boolean defaultAS = as.getName() == null; for (String docID : combinedDocumentIds) { Document aDoc = compoundDocument.getDocument(docID); long stOffset = getOffsetInSrcDocument(docID, annot.getStartNode().getOffset().longValue()); if (stOffset == -1) continue; long enOffset = getOffsetInSrcDocument(docID, annot.getEndNode().getOffset().longValue()); if (enOffset == -1) continue; Annotation originalAnnot = null; try { Integer id = annot.getId(); if (defaultAS) { aDoc.getAnnotations() .add(id, new Long(stOffset), new Long(enOffset), annot.getType(), features); originalAnnot = aDoc.getAnnotations().get(id); } else { aDoc.getAnnotations(as.getName()) .add(id, new Long(stOffset), new Long(enOffset), annot.getType(), features); originalAnnot = aDoc.getAnnotations(as.getName()).get(id); } } catch (InvalidOffsetException ioe) { System.out.println(aDoc.getName() + "=" + stOffset + "=" + enOffset); throw new GateRuntimeException(ioe); } OffsetDetails od = new OffsetDetails(); od.setOldStartOffset(stOffset); od.setOldEndOffset(enOffset); od.setNewStartOffset(annot.getStartNode().getOffset().longValue()); od.setNewEndOffset(annot.getEndNode().getOffset().longValue()); od.setOriginalAnnotation(originalAnnot); od.setNewAnnotation(annot); addNewOffsetDetails(docID, od); break; } } }
public void splitter() { AnnotationSet sDetectionAS = gateDocument.getAnnotations("SentenceDetection"); AnnotationSet defaultAs = gateDocument.getAnnotations(""); for (Iterator<Annotation> it = sDetectionAS.iterator(); it.hasNext(); ) { Annotation currentSentenceAnnotation = it.next(); // Add the Sentence to the Annotation Set defaultAs.add( currentSentenceAnnotation.getStartNode(), currentSentenceAnnotation.getEndNode(), "Sentence", null); } gateDocument.removeAnnotationSet("SentenceDetection"); }
/** * Generation of a GATE document from a Behemoth one * * @param key URL of the input doc * @param inputDoc * @return * @throws ResourceInstantiationException * @throws InvalidOffsetException * @throws IOException */ public gate.Document generateGATEDoc(BehemothDocument inputDoc) throws ResourceInstantiationException, InvalidOffsetException, IOException { gate.Document gatedocument = null; // if no text is available (e.g. Tika has not extracted it) // let GATE do the parsing itself from the binary content if (inputDoc.getText() == null) { try { gatedocument = generateGATEDocFromLocalDump(inputDoc); // transfer the text from GATE to Behemoth String textContent = gatedocument.getContent().toString(); inputDoc.setText(textContent); return gatedocument; } catch (Exception e) { LOG.error("Can't generate GATE doc from byte dump", e); } } // if the input document does not have any text -> create a doc with an // empty text String text = inputDoc.getText(); if (inputDoc.getText() == null) text = ""; else text = inputDoc.getText(); gatedocument = Factory.newDocument(text); // then the metadata as document features FeatureMap docFeatures = gatedocument.getFeatures(); String docUrl = inputDoc.getUrl(); if (docUrl != null) docFeatures.put("gate.SourceURL", docUrl); if (inputDoc.getMetadata() != null) { Iterator<Entry<Writable, Writable>> iter = inputDoc.getMetadata().entrySet().iterator(); while (iter.hasNext()) { Entry<Writable, Writable> entry = iter.next(); String skey = entry.getKey().toString().trim(); String svalue = null; if (entry.getValue() != null) svalue = entry.getValue().toString().trim(); docFeatures.put(skey, svalue); } } // finally the annotations as original markups // TODO change the name of the annotation set via config AnnotationSet outputAS = gatedocument.getAnnotations("Original markups"); for (Annotation annot : inputDoc.getAnnotations()) { // add to outputAS as a GATE annotation FeatureMap features = Factory.newFeatureMap(); features.putAll(annot.getFeatures()); outputAS.add(annot.getStart(), annot.getEnd(), annot.getType(), features); } return gatedocument; }
protected void doExecute(Document theDocument) throws ExecutionException { interrupted = false; if (theDocument == null) { throw new ExecutionException("No document to process!"); } AnnotationSet outputAS = theDocument.getAnnotations(getOutputAnnotationSet()); if (containingType == null || containingType.isEmpty()) { annotateText(document, outputAS, 0, document.getContent().size()); } else { AnnotationSet inputAS = null; if (inputASName == null || inputASName.isEmpty()) { inputAS = theDocument.getAnnotations(); } else { inputAS = theDocument.getAnnotations(inputASName); } AnnotationSet containingAnns = inputAS.get(containingType); for (Annotation containingAnn : containingAnns) { annotateText( document, outputAS, gate.Utils.start(containingAnn), gate.Utils.end(containingAnn)); } } }
@Override public void execute() throws ExecutionException { Document doc = getDocument(); AnnotationSet as = doc.getAnnotations(getAnnotationSetName()); AnnotationSet tocs = as.get(getTokenAnnotationTypeName()); try { for (Annotation t : tocs) { String content = Utils.stringFor(doc, t); String val = getOrthographyValue(content); if (val != null) t.getFeatures().put("orth", val); } } catch (Exception e) { throw new ExecutionException(e); } }
/** * Checks two documents for equality. * * @param doc1 a document * @param doc2 another document * @return a boolean. */ public static boolean documentsEqual(Document doc1, Document doc2) { message = ""; if (doc1 == null ^ doc2 == null) { message = "Documents not equal: null<>non-null!"; return false; } if (doc1 == null) return true; if (!check(doc1.getContent(), doc2.getContent())) { message = "Document contents different!"; return false; } if (!check(doc1.getAnnotations(), doc2.getAnnotations())) { message = "Documents default AS not equal!"; return false; } if (doc1 instanceof TextualDocument) { if (doc2 instanceof TextualDocument) { if (!check( ((TextualDocument) doc1).getEncoding(), ((TextualDocument) doc2).getEncoding())) { message = "Textual documents with different encodings!"; return false; } } else { message = "Documents not equal: textual<>non-textual!"; return false; } } if (!check(doc1.getFeatures(), doc2.getFeatures())) { message = "Documents features not equal!"; return false; } // needs friend declaration :( // if(!markupAware.equals(doc.markupAware)) return false; if (!check(doc1.getNamedAnnotationSets(), doc2.getNamedAnnotationSets())) { message = "Documents named annots not equal!"; return false; } // if(doc1 instanceof DocumentImpl){ // if(doc2 instanceof DocumentImpl){ // if(! check(((DocumentImpl)doc1).getNextNodeId(), // ((DocumentImpl)doc2).getNextNodeId())){ // message = "Documents next nodeID not equal!"; // return false; // } // if(! check(((DocumentImpl)doc1).getNextAnnotationId(), // ((DocumentImpl)doc2).getNextAnnotationId())){ // message = "Documents next annotationIDs not equal!"; // return false; // } // }else{ // message = "Documents not equal: DocumentImpl<>non-DocumentImpl!"; // return false; // } // } if (!check(doc1.getSourceUrl(), doc2.getSourceUrl())) { message = "Documents sourceURLs not equal!"; return false; } if (!(check(doc1.getSourceUrlStartOffset(), doc2.getSourceUrlStartOffset()) && check(doc1.getSourceUrlEndOffset(), doc2.getSourceUrlEndOffset()))) { message = "Documents sourceURLOffsets not equal!"; return false; } return true; }
public void annotationSetRemoved(DocumentEvent de) { Document doc = (Document) de.getSource(); if (this == doc) { doc.getAnnotations(de.getAnnotationSetName()).removeAnnotationSetListener(this); } }
/** * The main entry point. First we parse the command line options (see usage() method for details), * then we take all remaining command line parameters to be file names to process. Each file is * loaded, processed using the application and the results written to the output file * (inputFile.out.xml). */ public static void main(String[] args) throws Exception { parseCommandLine(args); // initialise GATE - this must be done before calling any GATE APIs Gate.init(); // load the saved application CorpusController application = (CorpusController) PersistenceManager.loadObjectFromFile(gappFile); // Create a Corpus to use. We recycle the same Corpus object for each // iteration. The string parameter to newCorpus() is simply the // GATE-internal name to use for the corpus. It has no particular // significance. ArrayList<String> files = getFilesFromDir(inputDir); gate.Corpus corpus = createCorpus(files); // Corpus corpus = Factory.newCorpus("BatchProcessApp Corpus"); application.setCorpus(corpus); System.out.println("Processing " + files.size() + " files"); // process the files one by one for (int i = 0; i < files.size(); i++) { // load the document (using the specified encoding if one was given) File docFile = new File(files.get(i)); System.out.print("Processing document " + docFile + " (" + i + ") ..."); Document doc = Factory.newDocument(docFile.toURL(), encoding); // put the document in the corpus corpus.add(doc); // run the application application.execute(); // remove the document from the corpus again corpus.clear(); String docXMLString = null; // if we want to just write out specific annotation types, we must // extract the annotations into a Set if (annotTypesToWrite != null) { // Create a temporary Set to hold the annotations we wish to write out Set annotationsToWrite = new HashSet(); // we only extract annotations from the default (unnamed) AnnotationSet // in this example AnnotationSet defaultAnnots = doc.getAnnotations("Output"); Iterator annotTypesIt = annotTypesToWrite.iterator(); while (annotTypesIt.hasNext()) { // extract all the annotations of each requested type and add them to // the temporary set AnnotationSet annotsOfThisType = defaultAnnots.get((String) annotTypesIt.next()); if (annotsOfThisType != null) { annotationsToWrite.addAll(annotsOfThisType); } } // create the XML string using these annotations docXMLString = doc.toXml(annotationsToWrite, true); } // otherwise, just write out the whole document as GateXML else { docXMLString = doc.toXml(); } // Release the document, as it is no longer needed Factory.deleteResource(doc); // output the XML to <inputFile>.out.xml System.out.println("Writing file " + docFile.getName()); String outputFileName = docFile.getName() + ".out.xml"; // File outputFile = new File(docFile.getParentFile(), outputFileName); File outputFile = new File(new File(outputDir).getAbsolutePath(), outputFileName); // Write output files using the same encoding as the original FileOutputStream fos = new FileOutputStream(outputFile); BufferedOutputStream bos = new BufferedOutputStream(fos); OutputStreamWriter out; if (encoding == null) { out = new OutputStreamWriter(bos); } else { out = new OutputStreamWriter(bos, encoding); } out.write(docXMLString); out.close(); System.out.println("done"); } // for each file System.out.println("All done"); } // void main(String[] args)
private RetObj ProcessRecords() throws Exception { // Create a Corpus to use. We recycle the same Corpus object for each // iteration. Corpus corpus = Factory.newCorpus("BatchProcessApp Corpus"); this.application.setCorpus(corpus); // object for returned data List<String> processedlines = new ArrayList<String>(); List<String> processedText = new ArrayList<String>(); for (int record_num = 0; record_num < this.recs.size(); ++record_num) { /*if( record_num % Math.ceil(((double) this.recs.size())/10.0) == 0) System.out.println("Thread " + this.threadID + ": "+ ((int) ((double)record_num)/((double) this.recs.size())*100.0 ) +"% complete."); */ // first, split title from body and get embedded age in title.. String title_age = "-1"; String sep = "..THIS IS MY SEPARATION STRING.."; String title = ""; String body = this.recs.get(record_num); Boolean trimmed = false; int age_end = body.indexOf(",> "); if (age_end >= 0 && age_end < body.length()) { int age_start = body.lastIndexOf("-", age_end); if (age_start >= 0 && age_start < age_end) { title_age = body.substring(age_start + 1, age_end).trim(); if (!isInteger(title_age)) title_age = "-1"; else { title = body.substring(0, age_start); body = body.substring(age_end + 2, body.length()); body = title + sep + body; trimmed = true; } } if (!trimmed) { title = body.substring(0, age_end); body = body.substring(age_end + 2, body.length()); body = title + sep + body; trimmed = true; } } // -------------------- org.jsoup.nodes.Document htmldoc = Jsoup.parseBodyFragment(body.replaceAll("COMMA_GOES_HERE", ",")); Elements links = htmldoc.select("a[href]"); Elements media = htmldoc.select("[src]"); Elements imports = htmldoc.select("link[href]"); processedText.add(htmldoc.text().replace(sep, " ")); Document doc = Factory.newDocument(htmldoc.text()); // put the document in the corpus corpus.add(doc); // run the application this.application.execute(); // remove the document from the corpus again corpus.clear(); // extract annotations String line = ""; AnnotationSet Annots = doc.getAnnotations(""); Integer FirstPersonCount = 0, ThirdPersonCount = 0; AnnotationSet FirstPerson = Annots.get("FirstPerson"); if (FirstPerson != null) FirstPersonCount = FirstPerson.size(); AnnotationSet ThirdPerson = Annots.get("ThirdPerson"); if (ThirdPerson != null) ThirdPersonCount = ThirdPerson.size(); line += FirstPersonCount.toString() + "," + ThirdPersonCount.toString() + ","; AnnotationSet Names = Annots.get("Name"); if (Names == null || Names.size() < 1) line += ","; else { Iterator<Annotation> Iter = Names.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("name"); if (Feat != null) line += Feat.toString(); if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet Age = Annots.get("Age"); if (Age == null || Age.size() < 1) line += title_age + ","; else { Iterator<Annotation> Iter = Age.inDocumentOrder().iterator(); line += title_age + ";"; while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("age"); if (Feat != null) line += Feat.toString(); if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet Cost = Annots.get("Cost"); if (Cost == null || Cost.size() < 1) line += ","; else { Iterator<Annotation> Iter = Cost.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("value"); if (Feat != null) line += Feat.toString(); else line += "none"; line += "/"; Feat = Ann.getFeatures().get("target_value"); if (Feat != null) line += Feat.toString(); else line += "none"; line += "/"; Feat = Ann.getFeatures().get("target_type"); if (Feat != null) line += Feat.toString(); else line += "none"; if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet height = Annots.get("height"); if (height == null || height.size() < 1) line += ",,"; else { String ft = ""; String inch = ""; Iterator<Annotation> Iter = height.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("feet"); if (Feat != null) ft += Feat.toString(); else ft += "none"; Feat = Ann.getFeatures().get("inches"); if (Feat != null) inch += Feat.toString(); else inch += "none"; if (Iter.hasNext()) { ft += ";"; inch += ";"; } } line += ft + "," + inch + ","; } AnnotationSet weight = Annots.get("weight"); if (weight == null || weight.size() < 1) line += ","; else { Iterator<Annotation> Iter = weight.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("pounds"); if (Feat != null) line += Feat.toString(); if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet measurement = Annots.get("measurement"); if (measurement == null || measurement.size() < 1) line += ",,,,"; else { String cup = ""; String chest = ""; String waist = ""; String hip = ""; Iterator<Annotation> Iter = measurement.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("cup"); if (Feat != null) cup += Feat.toString(); else cup += "none"; Feat = Ann.getFeatures().get("chest"); if (Feat != null) chest += Feat.toString(); else chest += "none"; Feat = Ann.getFeatures().get("waist"); if (Feat != null) waist += Feat.toString(); else waist += "none"; Feat = Ann.getFeatures().get("hip"); if (Feat != null) hip += Feat.toString(); else hip += "none"; if (Iter.hasNext()) { cup += ";"; chest += ";"; waist += ";"; hip += ";"; } } line += cup + "," + chest + "," + waist + "," + hip + ","; } AnnotationSet Ethnicity = Annots.get("Ethnicity"); if (Ethnicity == null || Ethnicity.size() < 1) line += ","; else { Iterator<Annotation> Iter = Ethnicity.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("ethnicity"); if (Feat != null) line += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " "); if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet SkinColor = Annots.get("SkinColor"); if (SkinColor == null || SkinColor.size() < 1) line += ","; else { Iterator<Annotation> Iter = SkinColor.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("color"); if (Feat != null) line += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " "); if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet EyeColor = Annots.get("EyeColor"); if (EyeColor == null || EyeColor.size() < 1) line += ","; else { Iterator<Annotation> Iter = EyeColor.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("color"); if (Feat != null) line += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " "); if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet HairColor = Annots.get("HairColor"); if (HairColor == null || HairColor.size() < 1) line += ","; else { Iterator<Annotation> Iter = HairColor.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("color"); if (Feat != null) line += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " "); if (Iter.hasNext()) line += ";"; } line += ","; } AnnotationSet Restriction = Annots.get("Restriction"); if (Restriction == null || Restriction.size() < 1) line += ",,,"; else { String type = ""; String ethnicity = ""; String age = ""; Iterator<Annotation> Iter = Restriction.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("type"); if (Feat != null) type += Feat.toString(); else type += "none"; Feat = Ann.getFeatures().get("ethnicity"); if (Feat != null) ethnicity += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " "); else ethnicity += "none"; Feat = Ann.getFeatures().get("age"); if (Feat != null) age += Feat.toString(); else age += "none"; if (Iter.hasNext()) { type += ";"; ethnicity += ";"; age += ";"; } } line += type + "," + ethnicity + "," + age + ","; } AnnotationSet Phone = Annots.get("PhoneNumber"); if (Phone == null || Phone.size() < 1) line += ",,,"; else { String value = ""; String state = ""; String city = ""; Iterator<Annotation> Iter = Phone.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("value"); if (Feat != null) value += Feat.toString(); else value += "none"; Feat = Ann.getFeatures().get("state"); if (Feat != null) state += Feat.toString(); else state += "none"; Feat = Ann.getFeatures().get("area"); if (Feat != null) city += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " "); else city += "none"; if (Iter.hasNext()) { value += ";"; state += ";"; city += ";"; } } line += value + "," + state + "," + city + ","; } String Emails = ""; AnnotationSet Email = Annots.get("Email"); if (Email == null || Email.size() < 1) Emails = ""; else { Iterator<Annotation> Iter = Email.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("email"); if (Feat != null) Emails += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " ") + ";"; } } if (links != null) { for (Element l : links) { String href = l.attr("abs:href"); if (href == null) continue; if (href.length() > 7 && href.substring(0, 7).toLowerCase().equals("mailto:")) { Emails += href.substring(7, href.length()).replaceAll(",", " ").replaceAll(";", " ") + ";"; } } } if (Emails.length() > 0 && Emails.substring(Emails.length() - 1, Emails.length()).equals(";")) Emails = Emails.substring(0, Emails.length() - 1); line += Emails + ","; String Urls = ""; AnnotationSet Url = Annots.get("Url"); if (Url == null || Url.size() < 1) Urls = ""; else { Iterator<Annotation> Iter = Url.inDocumentOrder().iterator(); while (Iter.hasNext()) { Annotation Ann = Iter.next(); Object Feat = Ann.getFeatures().get("url"); if (Feat != null) Urls += Feat.toString().toLowerCase().replaceAll(",", " ").replaceAll(";", " ") + ";"; } } if (links != null) { for (Element l : links) { String href = l.attr("abs:href"); if (href == null) continue; if (href.length() <= 7 || !href.substring(0, 7).toLowerCase().equals("mailto:")) { Urls += href.replaceAll(",", " ").replaceAll(";", " ") + ";"; } } } if (imports != null) { for (Element l : imports) { String href = l.attr("abs:href"); if (href == null) continue; Urls += href.replaceAll(",", " ").replaceAll(";", " ") + ";"; } } if (Urls.length() > 0 && Urls.substring(Urls.length() - 1, Urls.length()).equals(";")) Urls = Urls.substring(0, Urls.length() - 1); line += Urls + ","; String Medias = ""; if (media != null) { for (Element l : media) { String src = l.attr("abs:src"); if (src == null) continue; Medias += src.replaceAll(",", " ").replaceAll(";", " ") + ";"; } } if (Medias.length() > 0 && Medias.substring(Medias.length() - 1, Medias.length()).equals(";")) Medias = Medias.substring(0, Medias.length() - 1); line += Medias; processedlines.add(line); // Release the document, as it is no longer needed Factory.deleteResource(doc); } Factory.deleteResource(corpus); RetObj out = new RetObj(processedlines, processedText); return out; }
/** * Run from the command-line, with a list of URLs as argument. * * <p><B>NOTE:</B><br> * This code will run with all the documents in memory - if you want to unload each from memory * after use, add code to store the corpus in a DataStore. */ public static void main(String args[]) throws GateException, IOException { // initialise the GATE library Out.prln("Initialising GATE..."); Gate.init(); Out.prln("...GATE initialised"); // initialise ANNIE (this may take several minutes) StandAloneAnnie annie = new StandAloneAnnie(); annie.initAnnie(); // create a GATE corpus and add a document for each command-line // argument Corpus corpus = Factory.newCorpus("StandAloneAnnie corpus"); for (int i = 0; i < args.length; i++) { URL u = new URL(args[i]); FeatureMap params = Factory.newFeatureMap(); params.put("sourceUrl", u); params.put("preserveOriginalContent", new Boolean(true)); params.put("collectRepositioningInfo", new Boolean(true)); Out.prln("Creating doc for " + u); Document doc = (Document) Factory.createResource("gate.corpora.DocumentImpl", params); corpus.add(doc); } // for each of args // tell the pipeline about the corpus and run it annie.setCorpus(corpus); annie.execute(); // for each document, get an XML document with the // person and location names added Iterator iter = corpus.iterator(); int count = 0; String startTagPart_1 = "<span GateID=\""; String startTagPart_2 = "\" title=\""; String startTagPart_3 = "\" style=\"background:Red;\">"; String endTag = "</span>"; while (iter.hasNext()) { Document doc = (Document) iter.next(); AnnotationSet defaultAnnotSet = doc.getAnnotations(); Set annotTypesRequired = new HashSet(); annotTypesRequired.add("Person"); annotTypesRequired.add("Location"); Set<Annotation> peopleAndPlaces = new HashSet<Annotation>(defaultAnnotSet.get(annotTypesRequired)); FeatureMap features = doc.getFeatures(); String originalContent = (String) features.get(GateConstants.ORIGINAL_DOCUMENT_CONTENT_FEATURE_NAME); RepositioningInfo info = (RepositioningInfo) features.get(GateConstants.DOCUMENT_REPOSITIONING_INFO_FEATURE_NAME); ++count; File file = new File("StANNIE_" + count + ".HTML"); Out.prln("File name: '" + file.getAbsolutePath() + "'"); if (originalContent != null && info != null) { Out.prln("OrigContent and reposInfo existing. Generate file..."); Iterator it = peopleAndPlaces.iterator(); Annotation currAnnot; SortedAnnotationList sortedAnnotations = new SortedAnnotationList(); while (it.hasNext()) { currAnnot = (Annotation) it.next(); sortedAnnotations.addSortedExclusive(currAnnot); } // while StringBuffer editableContent = new StringBuffer(originalContent); long insertPositionEnd; long insertPositionStart; // insert anotation tags backward Out.prln("Unsorted annotations count: " + peopleAndPlaces.size()); Out.prln("Sorted annotations count: " + sortedAnnotations.size()); for (int i = sortedAnnotations.size() - 1; i >= 0; --i) { currAnnot = (Annotation) sortedAnnotations.get(i); insertPositionStart = currAnnot.getStartNode().getOffset().longValue(); insertPositionStart = info.getOriginalPos(insertPositionStart); insertPositionEnd = currAnnot.getEndNode().getOffset().longValue(); insertPositionEnd = info.getOriginalPos(insertPositionEnd, true); if (insertPositionEnd != -1 && insertPositionStart != -1) { editableContent.insert((int) insertPositionEnd, endTag); editableContent.insert((int) insertPositionStart, startTagPart_3); editableContent.insert((int) insertPositionStart, currAnnot.getType()); editableContent.insert((int) insertPositionStart, startTagPart_2); editableContent.insert((int) insertPositionStart, currAnnot.getId().toString()); editableContent.insert((int) insertPositionStart, startTagPart_1); } // if } // for FileWriter writer = new FileWriter(file); writer.write(editableContent.toString()); writer.close(); } // if - should generate else if (originalContent != null) { Out.prln("OrigContent existing. Generate file..."); Iterator it = peopleAndPlaces.iterator(); Annotation currAnnot; SortedAnnotationList sortedAnnotations = new SortedAnnotationList(); while (it.hasNext()) { currAnnot = (Annotation) it.next(); sortedAnnotations.addSortedExclusive(currAnnot); } // while StringBuffer editableContent = new StringBuffer(originalContent); long insertPositionEnd; long insertPositionStart; // insert anotation tags backward Out.prln("Unsorted annotations count: " + peopleAndPlaces.size()); Out.prln("Sorted annotations count: " + sortedAnnotations.size()); for (int i = sortedAnnotations.size() - 1; i >= 0; --i) { currAnnot = (Annotation) sortedAnnotations.get(i); insertPositionStart = currAnnot.getStartNode().getOffset().longValue(); insertPositionEnd = currAnnot.getEndNode().getOffset().longValue(); if (insertPositionEnd != -1 && insertPositionStart != -1) { editableContent.insert((int) insertPositionEnd, endTag); editableContent.insert((int) insertPositionStart, startTagPart_3); editableContent.insert((int) insertPositionStart, currAnnot.getType()); editableContent.insert((int) insertPositionStart, startTagPart_2); editableContent.insert((int) insertPositionStart, currAnnot.getId().toString()); editableContent.insert((int) insertPositionStart, startTagPart_1); } // if } // for FileWriter writer = new FileWriter(file); writer.write(editableContent.toString()); writer.close(); } else { Out.prln("Content : " + originalContent); Out.prln("Repositioning: " + info); } String xmlDocument = doc.toXml(peopleAndPlaces, false); String fileName = new String("StANNIE_toXML_" + count + ".HTML"); FileWriter writer = new FileWriter(fileName); writer.write(xmlDocument); writer.close(); } // for each doc } // main
// Process an input document with GATE and a Reporter public synchronized BehemothDocument[] process(BehemothDocument inputDoc, Reporter reporter) { if (reporter != null) reporter.setStatus("GATE : " + inputDoc.getUrl().toString()); boolean clearBehemothAnnotations = config.getBoolean("gate.deleteBehemothAnnotations", false); // process the text passed as value with the application // a) create a GATE document based on the text value gate.Document gatedocument = null; try { gatedocument = generateGATEDoc(inputDoc); // add it to the current corpus corpus.add(gatedocument); // get the application and assign the corpus to it this.GATEapplication.setCorpus(corpus); // process it with GATE this.GATEapplication.execute(); AnnotationSet annots = null; if ("".equals(filters.getAnnotationSetName())) annots = gatedocument.getAnnotations(); else annots = gatedocument.getAnnotations(filters.getAnnotationSetName()); // enrich the input doc with the annotations from // the GATE application // transfer the annotations from the GATE document // to the Behemoth one using the filters List<com.digitalpebble.behemoth.Annotation> beheannotations = convertGATEAnnotationsToBehemoth(annots, inputDoc); // sort the annotations before adding them? Collections.sort(beheannotations); // clear the existing behemoth annotations if (clearBehemothAnnotations) { inputDoc.getAnnotations().clear(); } inputDoc.getAnnotations().addAll(beheannotations); // add counters about num of annotations added if (reporter != null) for (com.digitalpebble.behemoth.Annotation annot : beheannotations) { reporter.incrCounter("GATE", annot.getType(), 1); } // Add the document features from GATE to Behemoth Set<String> docFeatFilter = this.filters.getDocFeaturesFilter(); MapWritable beheMD = inputDoc.getMetadata(true); if (docFeatFilter.size() > 0) { for (String docFeatName : docFeatFilter) { Object featValue = gatedocument.getFeatures().get(docFeatName); if (featValue != null) { beheMD.put(new Text(docFeatName), new Text(featValue.toString())); } } } if (reporter != null) reporter.incrCounter("GATE", "Document", 1); } catch (Exception e) { LOG.error(inputDoc.getUrl().toString(), e); if (reporter != null) reporter.incrCounter("GATE", "Exceptions", 1); } finally { // remove the document from the corpus again corpus.clear(); // and from memory if (gatedocument != null) Factory.deleteResource(gatedocument); } // currently returns only the input document return new BehemothDocument[] {inputDoc}; }
/** * This method annotates paragraphs in a GATE document. The investigated text spans beetween start * and end offsets and the paragraph annotations are created in the annotSetName. If annotSetName * is null then they are creted in the default annotation set. * * @param aDoc is the gate document on which the paragraph detection would be performed.If it is * null or its content it's null then the method woul simply return doing nothing. * @param startOffset is the index form the document content from which the paragraph detection * will start * @param endOffset is the offset where the detection will end. * @param annotSetName is the name of the set in which paragraph annotation would be created.The * annotation type created will be "paragraph" */ public void annotateParagraphs(Document aDoc, int startOffset, int endOffset, String annotSetName) throws DocumentFormatException { // Simply return if the document is null or its content if (aDoc == null || aDoc.getContent() == null) return; // Simply return if the start is > than the end if (startOffset > endOffset) return; // Decide where to put the newly detected annotations AnnotationSet annotSet = null; if (annotSetName == null) annotSet = aDoc.getAnnotations(); else annotSet = aDoc.getAnnotations(annotSetName); // Extract the document content String content = aDoc.getContent().toString(); // This is the offset marking the start of a para int startOffsetPara = startOffset; // This marks the ned of a para int endOffsetPara = endOffset; // The initial sate of the FSA int state = 1; // This field marks that a BR entity was read // A BR entity can be NL or NL CR, depending on the operating system (UNIX // or DOS) boolean readBR = false; int index = startOffset; while (index < endOffset) { // Read the current char char ch = content.charAt(index); // Test if a BR entity was read if (ch == '\n') { readBR = true; // If \n is followed by a \r then advance the index in order to read a // BR entity while ((index + 1 < endOffset) && (content.charAt(index + 1) == '\r')) index++; } // End if switch (state) { // It is the initial and also a final state // Stay in state 1 while it reads whitespaces case 1: { // If reads a non whitespace char then move to state 2 and record // the beggining of a paragraph if (!Character.isWhitespace(ch)) { state = 2; startOffsetPara = index; } // End if } break; // It can be also a final state. case 2: { // Stay in state 2 while reading chars != BR entities if (readBR) { // If you find a BR char go to state 3. The possible end of the para // can be index. This will be confirmed by state 3. So, this is why // the end of a para is recorded here. readBR = false; endOffsetPara = index; state = 3; } // End if } break; // It can be also a final state // From state 3 there are only 2 possible ways: (state 2 or state1) // In state 1 it needs to read a BR // For state 2 it nead to read something different then a BR case 3: { if (readBR) { // A BR was read. Go to state 1 readBR = false; state = 1; // Create an annotation type paragraph try { annotSet.add( new Long(startOffsetPara), new Long(endOffsetPara), "paragraph", Factory.newFeatureMap()); } catch (gate.util.InvalidOffsetException ioe) { throw new DocumentFormatException( "Coudn't create a paragraph" + " annotation", ioe); } // End try } else { // Go to state 2 an keep reading chars state = 2; } // End if } break; } // End switch // Prepare to read the next char. index++; } // End while endOffsetPara = index; // Investigate where the finite automata has stoped if (state == 2 || state == 3) { // Create an annotation type paragraph try { annotSet.add( new Long(startOffsetPara), // Create the final annotation using the endOffset new Long(endOffsetPara), "paragraph", Factory.newFeatureMap()); } catch (gate.util.InvalidOffsetException ioe) { throw new DocumentFormatException("Coudn't create a paragraph" + " annotation", ioe); } // End try } // End if } // End annotateParagraphs();