Пример #1
0
  // Gain(e) = negative risk = \sum_{e'} G(e, e')P(e')
  // curHyp: e
  // trueHyp: e'
  public static double computeExpectedGain(
      String curHyp, List<String> nbestHyps, List<Double> nbestProbs) {
    // ### get noralization constant, remember features, remember the combined linear score
    double gain = 0;

    for (int i = 0; i < nbestHyps.size(); i++) {
      String trueHyp = nbestHyps.get(i);
      double trueProb = nbestProbs.get(i);
      gain += trueProb * BLEU.computeSentenceBleu(trueHyp, curHyp, doNgramClip, bleuOrder);
    }
    // System.out.println("Gain is " + gain);
    return gain;
  }
Пример #2
0
  // Gain(e) = negative risk = \sum_{e'} G(e, e')P(e')
  // curHyp: e
  // trueHyp: e'
  public double computeExpectedGain(
      int curHypLen,
      HashMap<String, Integer> curHypNgramTbl,
      List<HashMap<String, Integer>> ngramTbls,
      List<Integer> sentLens,
      List<Double> nbestProbs) {

    // ### get noralization constant, remember features, remember the combined linear score
    double gain = 0;

    for (int i = 0; i < nbestProbs.size(); i++) {
      HashMap<String, Integer> trueHypNgramTbl = ngramTbls.get(i);
      double trueProb = nbestProbs.get(i);
      int trueLen = sentLens.get(i);
      gain +=
          trueProb
              * BLEU.computeSentenceBleu(
                  trueLen, trueHypNgramTbl, curHypLen, curHypNgramTbl, doNgramClip, bleuOrder);
    }
    // System.out.println("Gain is " + gain);
    return gain;
  }