示例#1
0
  public List<List<SemSig>> convertToVectors(
      List<String> sentence, ItemType type, LKB lkb, int vSize) {
    List<List<SemSig>> firstVectors = new ArrayList<List<SemSig>>();
    Set<SemSig> firstVectorSet = new HashSet<SemSig>();

    if (type.equals(ItemType.SENSE_OFFSETS)
        || type.equals(ItemType.SENSE_KEYS)
        || type.equals(ItemType.WORD_SENSE)) {
      firstVectorSet =
          new HashSet<SemSig>(
              TextualSimilarity.getInstance()
                  .getSenseVectorsFromOffsetSentence(sentence, type, lkb, vSize));

      for (SemSig s : firstVectorSet) firstVectors.add(Arrays.asList(s));
    } else {
      firstVectors =
          TextualSimilarity.getInstance().getSenseVectorsFromCookedSentence(sentence, lkb, vSize);
    }

    return firstVectors;
  }
示例#2
0
  public Pair<List<String>, List<String>> cookLexicalItem(
      String text, ItemType textType, boolean discardStopwords) {
    try {

      List<String> cookedSentence = new ArrayList<String>();
      Pair<List<String>, List<String>> out = new Pair<List<String>, List<String>>(null, null);

      for (String item : Arrays.asList(text.split(" "))) {
        if (item.trim().length() == 0) continue;

        switch (textType) {
          case SENSE_OFFSETS:
            cookedSentence.add(item);
            break;

          case SENSE_KEYS:
            IWord sense = WordNetUtils.getInstance().getSenseFromSenseKey(item);
            cookedSentence.add(
                GeneralUtils.fixOffset(sense.getSynset().getOffset(), sense.getPOS()));
            break;

          case WORD_SENSE:
            IWord snse = WordNetUtils.getInstance().mapWordSenseToIWord(item);
            cookedSentence.add(GeneralUtils.fixOffset(snse.getSynset().getOffset(), snse.getPOS()));
            break;

          case SURFACE_TAGGED:
            cookedSentence.add(item);
            break;
        }
      }

      if (textType.equals(ItemType.SURFACE)) {
        out = TextualSimilarity.getInstance().cookSentence(text);
        cookedSentence = out.first;
      }

      if (cookedSentence == null) cookedSentence = new ArrayList<String>();

      List<String> newCS = new ArrayList<String>();

      for (String s : cookedSentence) {
        // if it is a synset
        if (s.matches("[0-9]*\\-[anvr]")) {
          newCS.add(s);
          continue;
        }

        String comps[] = s.split("#");
        String word = comps[0];
        String ps = comps[1];

        // otherwise check word exists in WordNet
        if (!TextualSimilarity.getInstance().isOOV(word, ps)) newCS.add(word + "#" + ps);
      }

      cookedSentence = newCS;

      return new Pair<List<String>, List<String>>(cookedSentence, out.second);
    } catch (Exception e) {
      e.printStackTrace();
    }

    return null;
  }
示例#3
0
  public double getSimilarity(
      String text1,
      String text2,
      DisambiguationMethod disMethod,
      SignatureComparison measure,
      ItemType srcTextType,
      ItemType trgTextType) {

    // pre-process sentence pair
    List<String> cookedSentence1 = cookLexicalItem(text1, srcTextType, discardStopwords).first;
    List<String> cookedSentence2 = cookLexicalItem(text2, trgTextType, discardStopwords).first;

    // Mirror pos tagging
    if (mirrorPOStagging && srcTextType.equals(ItemType.SURFACE)
        || trgTextType.equals(ItemType.SURFACE)) {
      Pair<List<String>, List<String>> aPair = mirrorPosTags(cookedSentence1, cookedSentence2);

      cookedSentence1 = aPair.first;
      cookedSentence2 = aPair.second;
    }

    List<SemSig> srcSemSigs = new ArrayList<SemSig>();
    List<SemSig> trgSemSigs = new ArrayList<SemSig>();

    switch (disMethod) {
      case NONE:
        // take all the synsets (or Semsigs to be consistent with others) of all the words in the
        // two sides
        srcSemSigs =
            SemSigProcess.getInstance()
                .getAllSemSigsFromWordPosList(cookedSentence1, srcTextType, testedVectorSize);
        trgSemSigs =
            SemSigProcess.getInstance()
                .getAllSemSigsFromWordPosList(cookedSentence2, trgTextType, testedVectorSize);
        break;

        // alignment-based disambiguation
        // should disambiguate the two texts and return the disambiguated SemSigs
      case ALIGNMENT_BASED:
        Pair<List<SemSig>, List<SemSig>> disambiguatedPair =
            DisambiguateCookedSentence(
                cookedSentence1,
                cookedSentence2,
                srcTextType,
                trgTextType,
                LKB.WordNetGloss,
                alignmentMeasure,
                alignmentVecSize,
                true,
                true);

        srcSemSigs = disambiguatedPair.first;
        trgSemSigs = disambiguatedPair.second;

        break;
    }

    SemSig srcSemSig =
        (srcSemSigs.size() == 1) ? srcSemSigs.get(0) : SemSigUtils.averageSemSigs(srcSemSigs);

    SemSig trgSemSig =
        (trgSemSigs.size() == 1) ? trgSemSigs.get(0) : SemSigUtils.averageSemSigs(trgSemSigs);

    return SemSigComparator.compare(
        srcSemSig.getVector(), trgSemSig.getVector(), measure, testedVectorSize, false, true);
  }