Пример #1
0
 public static Annotation getHeadVerb(
     Annotation verb, Map<String, AnnotationSet> annotations, String text) {
   AnnotationSet verbs = annotations.get(Constants.POS).getContained(verb);
   if (verbs == null || verbs.size() < 1) return verb;
   Annotation result = verbs.getFirst();
   for (Annotation v : verbs) {
     if (!SyntaxUtils.isPreposition(v.getType())) {
       result = v;
     }
   }
   return result;
 }
Пример #2
0
 public static ArticleTypeEnum articleType(Annotation np, Document doc) {
   String[] indefs = {"a", "an", "one"};
   // String[] defs = { "the", "this", "that", "these", "those" };
   String[] quans = {"every", "all", "some", "most", "few", "many", "much"};
   String[] words = doc.getWords(np);
   String first = words[0];
   if (ProperName.getValue(np, doc)) return ArticleTypeEnum.DEFINITE;
   if (isPronoun(np, doc)) return ArticleTypeEnum.DEFINITE;
   if (memberArray(first, indefs)) return ArticleTypeEnum.INDEFINITE;
   if (memberArray(first, quans)) {
     if (words.length < 2 || !words[1].equalsIgnoreCase("of")) return ArticleTypeEnum.QUANTIFIED;
     else return ArticleTypeEnum.DEFINITE;
   }
   AnnotationSet pos = doc.getAnnotationSet(Constants.POS);
   pos = pos.getContained(np);
   if (pos != null && pos.size() > 0) {
     if (isCardinalNumber(pos.getFirst()) || isPredeterminer(pos.getFirst()))
       return ArticleTypeEnum.QUANTIFIED;
     if (isPossesivePronoun(pos.getFirst()) || isProperNoun(pos.getLast()))
       return ArticleTypeEnum.DEFINITE;
   }
   return ArticleTypeEnum.INDEFINITE;
 }
Пример #3
0
  @Override
  public Object produceValue(Annotation np, Document doc) {
    // Get the sentence annotations
    AnnotationSet par = doc.getAnnotationSet(Constants.PAR);
    AnnotationSet nps = doc.getAnnotationSet(Constants.NP);

    for (Annotation p : par) {
      int num;
      if (Constants.PAR_NUMS_UNAVAILABLE) {
        num = 0;
      } else {
        num = Integer.parseInt(p.getAttribute("parNum"));
      }
      AnnotationSet enclosed = nps.getContained(p);
      for (Annotation e : enclosed) {
        e.setProperty(this, num);
      }
    }

    // Make sure that all annotations have an associated PARNUM
    for (Annotation n : nps) {
      if (n.getProperty(this) == null) {
        AnnotationSet o = par.getOverlapping(0, n.getEndOffset());
        if (o == null || o.size() < 1) {
          n.setProperty(this, 0);
        } else {
          Annotation p = o.getLast();
          int num = Integer.parseInt(p.getAttribute("parNum")); // = 0;
          n.setProperty(this, num);
        }
      }
    }

    if (np.getProperty(this) == null) {
      AnnotationSet o = par.getOverlapping(0, np.getEndOffset());
      if (o == null || o.size() < 1) {
        np.setProperty(this, 0);
      } else {
        Annotation p = o.getLast();
        int num = Integer.parseInt(p.getAttribute("parNum"));
        np.setProperty(this, num);
      }
    }

    return np.getProperty(this);
  }
  @Override
  public void run(Document doc, String annSetNames[]) {

    String tagChunk = currentConfig.getTagChunk();
    String listDir = currentConfig.getTagChunkLists();

    AnnotationSet namedEntities = new AnnotationSet(annSetNames[0]);

    // get the sentences from the input
    AnnotationSet sentSet = doc.getAnnotationSet(Constants.SENT);

    // get the tokens from each sentence
    AnnotationSet tokenSet = doc.getAnnotationSet(Constants.TOKEN);

    // Read in the text from the raw file
    String text = doc.getText();

    Iterator<Annotation> sents = sentSet.iterator();
    ArrayList<String> lines = new ArrayList<String>();
    ArrayList<Vector<Annotation>> tokenList = new ArrayList<Vector<Annotation>>();

    while (sents.hasNext()) {
      Vector<Annotation> annVector = new Vector<Annotation>();
      Annotation sent = sents.next();
      int sentStart = sent.getStartOffset();
      int sentEnd = sent.getEndOffset();
      String sentText = Annotation.getAnnotText(sent, text);
      AnnotationSet sentTokens = tokenSet.getContained(sentStart, sentEnd);

      // gather all sentences to tag
      if (!sentText.matches("\\W+")) {
        StringBuilder tmp = new StringBuilder();
        for (Annotation a : sentTokens) {
          tmp.append(Annotation.getAnnotTextClean(a, text)).append(" ");
          annVector.add(a);
        }

        lines.add(tmp.toString());
        tokenList.add(annVector);
      }
    }

    // write out a tmp file that contains the words to be tagged
    File tmpFile = new File(doc.getRootDir(), "tmp.ner");
    try {
      tmpFile.deleteOnExit();
      FileWriter fw = new FileWriter(tmpFile);
      BufferedWriter bw = new BufferedWriter(fw);
      for (String l : lines) {
        // System.out.println(l);
        bw.write(l + "\n");
      }

      bw.close();
      fw.close();
    } catch (IOException ioe) {
      ioe.printStackTrace();
    }

    // run the tagger
    String command =
        tagChunk
            + " -predict . "
            + modelDir
            + Utils.SEPARATOR
            + models[0]
            + " "
            + tmpFile.getAbsolutePath()
            + " "
            + listDir;

    // collect the results
    ArrayList<String> results;
    int i = 0;
    try {
      results = Utils.runExternalCaptureOutput(command);
      Annotation current = null;
      for (String l : results) {
        Vector<Annotation> annVector = tokenList.get(i);

        // get rid of these extraneous tags
        l = l.replace("_O-O", "");
        String[] tokens = l.split(" ");
        // System.out.println(l);

        int j = 0;
        int underscore;
        int nes = 1;
        String tag;
        for (String t : tokens) {
          underscore = t.lastIndexOf('_');
          tag = t.substring(underscore + 1, t.length());
          Annotation a = annVector.get(j);
          // System.out.print(Utils.getAnnotTextClean(a, text) + "_" + tag + " ");

          if (tag.equals("B-O")) {
            j++;
            if (current != null) {
              namedEntities.add(current);
              // System.out.println("NE Found: " + Utils.getAnnotTextClean(current, text) + ":" +
              // current.getType());
              nes++;
              current = null;
            }

            continue;
          }

          String entityType = tag.substring(tag.indexOf("-") + 1, tag.length());

          if (entityType.equals("ORG")) {
            entityType = "ORGANIZATION";
          } else if (entityType.equals("LOC")) {
            entityType = "LOCATION";
          } else if (entityType.equals("PER")) {
            entityType = "PERSON";
          } else if (entityType.equals("VEH")) {
            entityType = "VEHICLE";
          }

          if (tag.startsWith("B-")) {
            if (current != null) {
              namedEntities.add(current);
              nes++;
              current = null;
              // System.out.println("NE Found: " + Utils.getAnnotTextClean(current, text));
            }

            current = new Annotation(nes, a.getStartOffset(), a.getEndOffset(), entityType);
          } else if (tag.startsWith("I-")) {
            if (current != null) {
              current.setEndOffset(a.getEndOffset());
            } else {
              current = new Annotation(nes, a.getStartOffset(), a.getEndOffset(), entityType);
            }
          }

          j++;
        }

        // System.out.println();
        i++;
      }
      FileUtils.delete(tmpFile);
    } catch (IOException e) {
      throw new RuntimeException(e);
    } catch (InterruptedException e) {
      throw new RuntimeException(e);
    }

    addResultSet(doc, namedEntities);
  }