Exemplo n.º 1
0
  @Test
  public void testInclinedPlane() throws IOException {
    DoubleMatrix1D normal = new DenseDoubleMatrix1D(3);
    normal.assign(new double[] {.0, .0, 1.0});

    InclinedPlane3D inclinedPlane = new InclinedPlane3D();
    inclinedPlane.setRandomGenerator(new MersenneTwister(123456789));
    inclinedPlane.setNormal(normal);
    inclinedPlane.setBounds(new Rectangle(-5, -5, 10, 10));
    inclinedPlane.setNoiseStd(0.5);
    DoubleMatrix2D data = inclinedPlane.generate(10);

    SVDPCA pca = new SVDPCA(data);

    System.out.println("Eigenvalues:");
    System.out.println(pca.getEigenvalues());

    System.out.println("Eigenvectors:");
    System.out.println(pca.getEigenvectors());

    System.out.println("Meanvector:");
    System.out.println(pca.getMean());

    // Recalculate the input from a truncated SVD, first calculate the mean
    DoubleMatrix1D mean = new SparseDoubleMatrix1D(3);
    for (int i = 0; i < data.rows(); ++i) {
      mean.assign(data.viewRow(i), Functions.plus);
    }
    mean.assign(Functions.div(data.rows()));

    // Truncate the SVD and calculate the coefficient matrix
    DenseDoubleMatrix2D coefficients = new DenseDoubleMatrix2D(data.rows(), 2);
    DoubleMatrix2D centeredInput = data.copy();
    for (int i = 0; i < data.rows(); ++i) {
      centeredInput.viewRow(i).assign(mean, Functions.minus);
    }
    centeredInput.zMult(
        pca.getEigenvectors().viewPart(0, 0, 2, 3), coefficients, 1, 0, false, true);

    // Reconstruct the data from the lower dimensional information
    DoubleMatrix2D reconstruction = data.copy();
    for (int i = 0; i < reconstruction.rows(); ++i) {
      reconstruction.viewRow(i).assign(mean);
    }
    coefficients.zMult(
        pca.getEigenvectors().viewPart(0, 0, 2, 3), reconstruction, 1, 1, false, false);

    // Output to file (can be read by GNU Plot)
    String fileName = "inclined-plane-svd-pca.dat";
    String packagePath = this.getClass().getPackage().getName().replaceAll("\\.", "/");
    File outputFile = new File("src/test/resources/" + packagePath + "/" + fileName);
    PrintWriter writer = new PrintWriter(outputFile);
    writer.write(data.toString());
    writer.close();
  }
  private double multiLL(DoubleMatrix2D coeffs, Node dep, List<Node> indep) {

    DoubleMatrix2D indepData =
        factory2D.make(internalData.subsetColumns(indep).getDoubleData().toArray());
    List<Node> depList = new ArrayList<>();
    depList.add(dep);
    DoubleMatrix2D depData =
        factory2D.make(internalData.subsetColumns(depList).getDoubleData().toArray());

    int N = indepData.rows();
    DoubleMatrix2D probs =
        Algebra.DEFAULT.mult(factory2D.appendColumns(factory2D.make(N, 1, 1.0), indepData), coeffs);

    probs =
        factory2D
            .appendColumns(factory2D.make(indepData.rows(), 1, 1.0), probs)
            .assign(Functions.exp);
    double ll = 0;
    for (int i = 0; i < N; i++) {
      DoubleMatrix1D curRow = probs.viewRow(i);
      curRow.assign(Functions.div(curRow.zSum()));
      ll += Math.log(curRow.get((int) depData.get(i, 0)));
    }
    return ll;
  }
Exemplo n.º 3
0
  static boolean computeLogMi(
      FeatureGenerator featureGen,
      double lambda[],
      DoubleMatrix2D Mi_YY,
      DoubleMatrix1D Ri_Y,
      boolean takeExp,
      boolean reuseM,
      boolean initMDone) {

    if (reuseM && initMDone) {
      Mi_YY = null;
    } else initMDone = false;
    if (Mi_YY != null) Mi_YY.assign(0);
    Ri_Y.assign(0);
    while (featureGen.hasNext()) {
      Feature feature = featureGen.next();
      int f = feature.index();
      int yp = feature.y();
      int yprev = feature.yprev();
      float val = feature.value();
      //	    System.out.println(feature.toString());

      if (yprev < 0) {
        // this is a single state feature.
        double oldVal = Ri_Y.getQuick(yp);
        Ri_Y.setQuick(yp, oldVal + lambda[f] * val);
      } else if (Mi_YY != null) {
        Mi_YY.setQuick(yprev, yp, Mi_YY.getQuick(yprev, yp) + lambda[f] * val);
        initMDone = true;
      }
    }
    if (takeExp) {
      for (int r = Ri_Y.size() - 1; r >= 0; r--) {
        Ri_Y.setQuick(r, expE(Ri_Y.getQuick(r)));
        if (Mi_YY != null)
          for (int c = Mi_YY.columns() - 1; c >= 0; c--) {
            Mi_YY.setQuick(r, c, expE(Mi_YY.getQuick(r, c)));
          }
      }
    }
    return initMDone;
  }
Exemplo n.º 4
0
  /**
   * Predict the value of an instance.
   *
   * @param x instance
   * @return value of prediction
   */
  public double prediction(I x) {
    double pred = b;

    DoubleMatrix1D xm = new DenseDoubleMatrix1D(m.columns());
    pred +=
        x.operate(
            (i, xi) -> {
              double wi = w.getQuick(i);
              DoubleMatrix1D mi = m.viewRow(i);

              xm.assign(mi, (r, s) -> r + xi * s);

              return xi * wi - 0.5 * xi * xi * mi.zDotProduct(mi);
            },
            (v1, v2) -> v1 + v2);

    pred += 0.5 * xm.zDotProduct(xm);

    return pred;
  }
  /**
   * Returns the best cut of a graph w.r.t. the degree of dissimilarity between points of different
   * partitions and the degree of similarity between points of the same partition.
   *
   * @param W the weight matrix of the graph
   * @return an array of two elements, each of these contains the points of a partition
   */
  protected static int[][] bestCut(DoubleMatrix2D W) {
    int n = W.columns();
    // Builds the diagonal matrices D and D^(-1/2) (represented as their diagonals)
    DoubleMatrix1D d = DoubleFactory1D.dense.make(n);
    DoubleMatrix1D d_minus_1_2 = DoubleFactory1D.dense.make(n);
    for (int i = 0; i < n; i++) {
      double d_i = W.viewRow(i).zSum();
      d.set(i, d_i);
      d_minus_1_2.set(i, 1 / Math.sqrt(d_i));
    }
    DoubleMatrix2D D = DoubleFactory2D.sparse.diagonal(d);

    // System.out.println("DoubleMatrix2D :\n"+D.toString());

    DoubleMatrix2D X = D.copy();

    // System.out.println("DoubleMatrix2D copy :\n"+X.toString());

    // X = D^(-1/2) * (D - W) * D^(-1/2)
    X.assign(W, Functions.minus);
    // System.out.println("DoubleMatrix2D X: (D-W) :\n"+X.toString());
    for (int i = 0; i < n; i++)
      for (int j = 0; j < n; j++)
        X.set(i, j, X.get(i, j) * d_minus_1_2.get(i) * d_minus_1_2.get(j));

    // Computes the eigenvalues and the eigenvectors of X
    EigenvalueDecomposition e = new EigenvalueDecomposition(X);
    DoubleMatrix1D lambda = e.getRealEigenvalues();

    // Selects the eigenvector z_2 associated with the second smallest eigenvalue
    // Creates a map that contains the pairs <index, eigenvalue>
    AbstractIntDoubleMap map = new OpenIntDoubleHashMap(n);
    for (int i = 0; i < n; i++) map.put(i, Math.abs(lambda.get(i)));
    IntArrayList list = new IntArrayList();
    // Sorts the map on the value
    map.keysSortedByValue(list);
    // Gets the index of the second smallest element
    int i_2 = list.get(1);

    // y_2 = D^(-1/2) * z_2
    DoubleMatrix1D y_2 = e.getV().viewColumn(i_2).copy();
    y_2.assign(d_minus_1_2, Functions.mult);

    // Creates a map that contains the pairs <i, y_2[i]>
    map.clear();
    for (int i = 0; i < n; i++) map.put(i, y_2.get(i));
    // Sorts the map on the value
    map.keysSortedByValue(list);
    // Search the element in the map previuosly ordered that minimizes the cut
    // of the partition
    double best_cut = Double.POSITIVE_INFINITY;
    int[][] partition = new int[2][];

    // The array v contains all the elements of the graph ordered by their
    // projection on vector y_2
    int[] v = list.elements();
    // For each admissible splitting point i
    for (int i = 1; i < n; i++) {
      // The array a contains all the elements that have a projection on vector
      // y_2 less or equal to the one of i-th element
      // The array b contains the remaining elements
      int[] a = new int[i];
      int[] b = new int[n - i];
      System.arraycopy(v, 0, a, 0, i);
      System.arraycopy(v, i, b, 0, n - i);
      double cut = Ncut(W, a, b, v);
      if (cut < best_cut) {
        best_cut = cut;
        partition[0] = a;
        partition[1] = b;
      }
    }

    // System.out.println("Partition:");
    // UtilsJS.printMatrix(partition);

    return partition;
  }
 /**
  * Returns the Euclidean distance between two points. It is used to compute the similarity degree
  * of these ones.
  *
  * @param x the first point
  * @param y the second point
  * @return the Euclidean distance between the points
  */
 protected static double distnorm2(DoubleMatrix1D x, DoubleMatrix1D y) {
   DoubleMatrix1D z = x.copy();
   z.assign(y, Functions.minus);
   return z.zDotProduct(z);
 }
Exemplo n.º 7
0
  public void compute() {

    alpha_Y.assign(1);
    initMDone = false;
    boolean doScaling = params.doScaling;

    if ((beta_Y == null) || (beta_Y.length < dataSeq.length())) {
      beta_Y = new DenseDoubleMatrix1D[2 * dataSeq.length()];
      for (int i = 0; i < beta_Y.length; i++) beta_Y[i] = new DenseDoubleMatrix1D(numY);

      scale = new double[2 * dataSeq.length()];
      scale[dataSeq.length() - 1] = (doScaling) ? numY : 1;
      beta_Y[dataSeq.length() - 1].assign(1.0 / scale[dataSeq.length() - 1]);
    }
    beta.add(beta_Y[dataSeq.length() - 1]);
    // System.out.println("Beta "+beta_Y[3].toString());
    for (int i = dataSeq.length() - 1; i > 0; i--) {
      if (params.debugLvl > 2) {
        Util.printDbg("Features fired");
        // featureGenerator.startScanFeaturesAt(dataSeq, i);
        // while (featureGenerator.hasNext()) {
        // Feature feature = featureGenerator.next();
        // Util.printDbg(feature.toString());
        // }
      }

      // compute the Mi matrix
      // System.out.println("MI previous" +Mi_YY.toString());
      initMDone =
          Trainer.computeLogMi(
              featureGenerator, lambda, dataSeq, i, Mi_YY, Ri_Y, true, reuseM, initMDone);
      // System.out.println("MI "+Mi_YY.toString());
      tmp_Y.assign(beta_Y[i]);
      tmp_Y.assign(Ri_Y, multFunc);
      RobustMath.Mult(Mi_YY, tmp_Y, beta_Y[i - 1], 1, 0, false, edgeGen);

      // need to scale the beta-s to avoid overflow
      scale[i - 1] = doScaling ? beta_Y[i - 1].zSum() : 1;
      if ((scale[i - 1] < 1) && (scale[i - 1] > -1)) scale[i - 1] = 1;
      constMultiplier.multiplicator = 1.0 / scale[i - 1];
      beta_Y[i - 1].assign(constMultiplier);
      // System.out.println("Beta "+beta_Y[i - 1].toString() + " ");
      beta.add(beta_Y[i - 1]);
    }

    double thisSeqLogli = 0;
    System.out.println("\n");
    // Mi_YY.assign(0);
    alpha_temp = new DenseDoubleMatrix1D[2 * dataSeq.length()];
    for (int i = 0; i < dataSeq.length(); i++) alpha_temp[i] = new DenseDoubleMatrix1D(numY);

    for (int i = 0; i < dataSeq.length(); i++) {
      // compute the Mi matrix
      //
      initMDone =
          Trainer.computeLogMi(
              featureGenerator, lambda, dataSeq, i, Mi_YY, Ri_Y, true, reuseM, initMDone);
      // System.out.println("MI: " + Mi_YY.toString());
      // find features that fire at this position..
      featureGenerator.startScanFeaturesAt(dataSeq, i);

      if (i > 0) {
        tmp_Y.assign(alpha_Y);
        RobustMath.Mult(Mi_YY, tmp_Y, newAlpha_Y, 1, 0, true, edgeGen);
        // Mi_YY.zMult(tmp_Y, newAlpha_Y,1,0,true);
        newAlpha_Y.assign(Ri_Y, multFunc);
      } else {
        newAlpha_Y.assign(Ri_Y);
      }
      while (featureGenerator.hasNext()) {
        Feature feature = featureGenerator.next();
        int f = feature.index();

        int yp = feature.y();
        int yprev = feature.yprev();
        float val = feature.value();
        if ((dataSeq.y(i) == yp)
            && (((i - 1 >= 0) && (yprev == dataSeq.y(i - 1))) || (yprev < 0))) {

          thisSeqLogli += val * lambda[f];
        }
      }

      alpha_Y.assign(newAlpha_Y);
      // now scale the alpha-s to avoid overflow problems.
      constMultiplier.multiplicator = 1.0 / scale[i];
      alpha_Y.assign(constMultiplier);
      alpha_temp[i].assign(newAlpha_Y);
      alpha_temp[i].assign(constMultiplier);
      // System.out.println("ALpha "+alpha_Y.toString());
      alpha.add(alpha_temp[i]);
      if (params.debugLvl > 2) {
        System.out.println("Alpha-i " + alpha_Y.toString());
        System.out.println("Ri " + Ri_Y.toString());
        System.out.println("Mi " + Mi_YY.toString());
        System.out.println("Beta-i " + beta_Y[i].toString());
      }
    }

    Zx = alpha_Y.zSum();
    System.out.println("Zx: " + Zx);
  } /* end of computeBeta */
Exemplo n.º 8
0
  protected double computeFunctionGradientLL(double lambda[], double grad[]) {
    double logli = 0;
    try {
      for (int f = 0; f < lambda.length; f++) {
        grad[f] = -1 * lambda[f] * params.invSigmaSquare;
        logli -= ((lambda[f] * lambda[f]) * params.invSigmaSquare) / 2;
      }
      diter.startScan();
      if (featureGenCache != null) featureGenCache.startDataScan();
      for (int numRecord = 0; diter.hasNext(); numRecord++) {
        DataSequence dataSeq = (DataSequence) diter.next();
        if (featureGenCache != null) featureGenCache.nextDataIndex();
        if (params.debugLvl > 1) {
          Util.printDbg("Read next seq: " + numRecord + " logli " + logli);
        }
        alpha_Y.assign(0);
        for (int f = 0; f < lambda.length; f++) ExpF[f] = RobustMath.LOG0;

        if ((beta_Y == null) || (beta_Y.length < dataSeq.length())) {
          beta_Y = new DenseDoubleMatrix1D[2 * dataSeq.length()];
          for (int i = 0; i < beta_Y.length; i++) beta_Y[i] = new DenseDoubleMatrix1D(numY);
        }
        // compute beta values in a backward scan.
        // also scale beta-values to 1 to avoid numerical problems.
        beta_Y[dataSeq.length() - 1].assign(0);
        for (int i = dataSeq.length() - 1; i > 0; i--) {
          if (params.debugLvl > 2) {
            /*  Util.printDbg("Features fired");
            featureGenerator.startScanFeaturesAt(dataSeq, i);
            while (featureGenerator.hasNext()) {
            Feature feature = featureGenerator.next();
            Util.printDbg(feature.toString());
            }
            */
          }

          // compute the Mi matrix
          initMDone =
              computeLogMi(
                  featureGenerator, lambda, dataSeq, i, Mi_YY, Ri_Y, false, reuseM, initMDone);
          tmp_Y.assign(beta_Y[i]);
          tmp_Y.assign(Ri_Y, sumFunc);
          RobustMath.logMult(Mi_YY, tmp_Y, beta_Y[i - 1], 1, 0, false, edgeGen);
        }

        double thisSeqLogli = 0;
        for (int i = 0; i < dataSeq.length(); i++) {
          // compute the Mi matrix
          initMDone =
              computeLogMi(
                  featureGenerator, lambda, dataSeq, i, Mi_YY, Ri_Y, false, reuseM, initMDone);
          // find features that fire at this position..
          featureGenerator.startScanFeaturesAt(dataSeq, i);

          if (i > 0) {
            tmp_Y.assign(alpha_Y);
            RobustMath.logMult(Mi_YY, tmp_Y, newAlpha_Y, 1, 0, true, edgeGen);
            newAlpha_Y.assign(Ri_Y, sumFunc);
          } else {
            newAlpha_Y.assign(Ri_Y);
          }

          while (featureGenerator.hasNext()) {
            Feature feature = featureGenerator.next();
            int f = feature.index();

            int yp = feature.y();
            int yprev = feature.yprev();
            float val = feature.value();

            if ((dataSeq.y(i) == yp)
                && (((i - 1 >= 0) && (yprev == dataSeq.y(i - 1))) || (yprev < 0))) {
              grad[f] += val;
              thisSeqLogli += val * lambda[f];
              if (params.debugLvl > 2) {
                System.out.println("Feature fired " + f + " " + feature);
              }
            }

            if (yprev < 0) {
              ExpF[f] =
                  RobustMath.logSumExp(
                      ExpF[f], newAlpha_Y.get(yp) + RobustMath.log(val) + beta_Y[i].get(yp));
            } else {
              ExpF[f] =
                  RobustMath.logSumExp(
                      ExpF[f],
                      alpha_Y.get(yprev)
                          + Ri_Y.get(yp)
                          + Mi_YY.get(yprev, yp)
                          + RobustMath.log(val)
                          + beta_Y[i].get(yp));
            }
          }
          alpha_Y.assign(newAlpha_Y);

          if (params.debugLvl > 2) {
            System.out.println("Alpha-i " + alpha_Y.toString());
            System.out.println("Ri " + Ri_Y.toString());
            System.out.println("Mi " + Mi_YY.toString());
            System.out.println("Beta-i " + beta_Y[i].toString());
          }
        }
        double lZx = RobustMath.logSumExp(alpha_Y);
        thisSeqLogli -= lZx;
        logli += thisSeqLogli;
        // update grad.
        for (int f = 0; f < grad.length; f++) {
          grad[f] -= RobustMath.exp(ExpF[f] - lZx);
        }
        if (params.debugLvl > 1) {
          System.out.println(
              "Sequence "
                  + thisSeqLogli
                  + " logli "
                  + logli
                  + " log(Zx) "
                  + lZx
                  + " Zx "
                  + Math.exp(lZx));
        }
      }
      if (params.debugLvl > 2) {
        for (int f = 0; f < lambda.length; f++) System.out.print(lambda[f] + " ");
        System.out.println(" :x");
        for (int f = 0; f < lambda.length; f++) System.out.print(grad[f] + " ");
        System.out.println(" :g");
      }

      if (params.debugLvl > 0)
        Util.printDbg(
            "Iteration "
                + icall
                + " log-likelihood "
                + logli
                + " norm(grad logli) "
                + norm(grad)
                + " norm(x) "
                + norm(lambda));

    } catch (Exception e) {
      System.out.println("Alpha-i " + alpha_Y.toString());
      System.out.println("Ri " + Ri_Y.toString());
      System.out.println("Mi " + Mi_YY.toString());

      e.printStackTrace();
      System.exit(0);
    }
    return logli;
  }
Exemplo n.º 9
0
  protected double computeFunctionGradient(double lambda[], double grad[]) {
    initMDone = false;

    if (params.trainerType.equals("ll")) return computeFunctionGradientLL(lambda, grad);
    double logli = 0;
    try {
      for (int f = 0; f < lambda.length; f++) {
        grad[f] = -1 * lambda[f] * params.invSigmaSquare;
        logli -= ((lambda[f] * lambda[f]) * params.invSigmaSquare) / 2;
      }
      boolean doScaling = params.doScaling;

      diter.startScan();
      if (featureGenCache != null) featureGenCache.startDataScan();
      int numRecord = 0;
      for (numRecord = 0; diter.hasNext(); numRecord++) {
        DataSequence dataSeq = (DataSequence) diter.next();
        if (featureGenCache != null) featureGenCache.nextDataIndex();
        if (params.debugLvl > 1) {
          Util.printDbg("Read next seq: " + numRecord + " logli " + logli);
        }
        alpha_Y.assign(1);
        for (int f = 0; f < lambda.length; f++) ExpF[f] = 0;

        if ((beta_Y == null) || (beta_Y.length < dataSeq.length())) {
          beta_Y = new DenseDoubleMatrix1D[2 * dataSeq.length()];
          for (int i = 0; i < beta_Y.length; i++) beta_Y[i] = new DenseDoubleMatrix1D(numY);

          scale = new double[2 * dataSeq.length()];
        }
        // compute beta values in a backward scan.
        // also scale beta-values to 1 to avoid numerical problems.
        scale[dataSeq.length() - 1] = (doScaling) ? numY : 1;
        beta_Y[dataSeq.length() - 1].assign(1.0 / scale[dataSeq.length() - 1]);
        for (int i = dataSeq.length() - 1; i > 0; i--) {
          if (params.debugLvl > 2) {
            Util.printDbg("Features fired");
            // featureGenerator.startScanFeaturesAt(dataSeq, i);
            // while (featureGenerator.hasNext()) {
            // Feature feature = featureGenerator.next();
            // Util.printDbg(feature.toString());
            // }
          }

          // compute the Mi matrix
          initMDone =
              computeLogMi(
                  featureGenerator, lambda, dataSeq, i, Mi_YY, Ri_Y, true, reuseM, initMDone);
          tmp_Y.assign(beta_Y[i]);
          tmp_Y.assign(Ri_Y, multFunc);
          RobustMath.Mult(Mi_YY, tmp_Y, beta_Y[i - 1], 1, 0, false, edgeGen);
          //		Mi_YY.zMult(tmp_Y, beta_Y[i-1]);

          // need to scale the beta-s to avoid overflow
          scale[i - 1] = doScaling ? beta_Y[i - 1].zSum() : 1;
          if ((scale[i - 1] < 1) && (scale[i - 1] > -1)) scale[i - 1] = 1;
          constMultiplier.multiplicator = 1.0 / scale[i - 1];
          beta_Y[i - 1].assign(constMultiplier);
        }

        double thisSeqLogli = 0;
        for (int i = 0; i < dataSeq.length(); i++) {
          // compute the Mi matrix
          initMDone =
              computeLogMi(
                  featureGenerator, lambda, dataSeq, i, Mi_YY, Ri_Y, true, reuseM, initMDone);
          // find features that fire at this position..
          featureGenerator.startScanFeaturesAt(dataSeq, i);

          if (i > 0) {
            tmp_Y.assign(alpha_Y);
            RobustMath.Mult(Mi_YY, tmp_Y, newAlpha_Y, 1, 0, true, edgeGen);
            //		Mi_YY.zMult(tmp_Y, newAlpha_Y,1,0,true);
            newAlpha_Y.assign(Ri_Y, multFunc);
          } else {
            newAlpha_Y.assign(Ri_Y);
          }
          while (featureGenerator.hasNext()) {
            Feature feature = featureGenerator.next();
            int f = feature.index();

            int yp = feature.y();
            int yprev = feature.yprev();
            float val = feature.value();
            if ((dataSeq.y(i) == yp)
                && (((i - 1 >= 0) && (yprev == dataSeq.y(i - 1))) || (yprev < 0))) {
              grad[f] += val;
              thisSeqLogli += val * lambda[f];
            }
            if (yprev < 0) {
              ExpF[f] += newAlpha_Y.get(yp) * val * beta_Y[i].get(yp);
            } else {
              ExpF[f] +=
                  alpha_Y.get(yprev)
                      * Ri_Y.get(yp)
                      * Mi_YY.get(yprev, yp)
                      * val
                      * beta_Y[i].get(yp);
            }
          }

          alpha_Y.assign(newAlpha_Y);
          // now scale the alpha-s to avoid overflow problems.
          constMultiplier.multiplicator = 1.0 / scale[i];
          alpha_Y.assign(constMultiplier);

          if (params.debugLvl > 2) {
            System.out.println("Alpha-i " + alpha_Y.toString());
            System.out.println("Ri " + Ri_Y.toString());
            System.out.println("Mi " + Mi_YY.toString());
            System.out.println("Beta-i " + beta_Y[i].toString());
          }
        }
        double Zx = alpha_Y.zSum();
        thisSeqLogli -= log(Zx);
        // correct for the fact that alpha-s were scaled.
        for (int i = 0; i < dataSeq.length(); i++) {
          thisSeqLogli -= log(scale[i]);
        }

        logli += thisSeqLogli;
        // update grad.
        for (int f = 0; f < grad.length; f++) grad[f] -= ExpF[f] / Zx;

        if (params.debugLvl > 1) {
          System.out.println(
              "Sequence "
                  + thisSeqLogli
                  + " logli "
                  + logli
                  + " log(Zx) "
                  + Math.log(Zx)
                  + " Zx "
                  + Zx);
        }
      }
      if (params.debugLvl > 2) {
        for (int f = 0; f < lambda.length; f++) System.out.print(lambda[f] + " ");
        System.out.println(" :x");
        for (int f = 0; f < lambda.length; f++)
          System.out.println(featureGenerator.featureName(f) + " " + grad[f] + " ");
        System.out.println(" :g");
      }

      if (params.debugLvl > 0)
        Util.printDbg(
            "Iter "
                + icall
                + " log likelihood "
                + logli
                + " norm(grad logli) "
                + norm(grad)
                + " norm(x) "
                + norm(lambda));
      if (icall == 0) {
        System.out.println("Number of training records" + numRecord);
      }
    } catch (Exception e) {
      System.out.println("Alpha-i " + alpha_Y.toString());
      System.out.println("Ri " + Ri_Y.toString());
      System.out.println("Mi " + Mi_YY.toString());

      e.printStackTrace();
      System.exit(0);
    }
    return logli;
  }