Beispiel #1
1
  public static double dnbinom_mu(double x, double size, double mu, boolean give_log) {
    /* originally, just set  prob :=  size / (size + mu)  and called dbinom_raw(),
     * but that suffers from cancellation when   mu << size  */
    double ans, p;

    if (DoubleVector.isNaN(x) || DoubleVector.isNaN(size) || DoubleVector.isNaN(mu)) {
      return x + size + mu;
    }

    if (mu < 0 || size < 0) {
      return DoubleVector.NaN;
    }

    // R_D_nonint_check(x);

    if (SignRank.R_D_nonint(x, true, give_log)) {
      // MATHLIB_WARNING("non-integer x = %f", x);
      // How to warn??
      return SignRank.R_D__0(true, give_log);
    }

    if (x < 0 || !DoubleVector.isFinite(x)) {
      return SignRank.R_D__0(true, give_log);
    }
    x = SignRank.R_D_forceint(x);

    if (x == 0) /* be accerate, both for n << mu, and n >> mu :*/ {
      return SignRank.R_D_exp(
          size * (size < mu ? Math.log(size / (size + mu)) : Math.log1p(-mu / (size + mu))),
          true,
          give_log);
    }
    if (x < 1e-10 * size) {
        /* don't use dbinom_raw() but MM's formula: */
      /* FIXME --- 1e-8 shows problem; rather use algdiv() from ./toms708.c */
      return SignRank.R_D_exp(
          x * Math.log(size * mu / (size + mu))
              - mu
              - org.apache.commons.math.special.Gamma.logGamma(x + 1)
              + Math.log1p(x * (x - 1) / (2 * size)),
          true,
          give_log);
    }
    /* else: no unnecessary cancellation inside dbinom_raw, when
     * x_ = size and n_ = x+size are so close that n_ - x_ loses accuracy
     */
    ans = dbinom_raw(size, x + size, size / (size + mu), mu / (size + mu), give_log);
    p = ((double) size) / (size + x);
    return ((give_log) ? Math.log(p) + ans : p * ans);
  }
Beispiel #2
0
  /**
   * Digamma function (the first derivative of the logarithm of the gamma function).
   *
   * @param array - variational parameter
   * @return
   */
  static double[][] dirichletExpectation(double[][] array) {
    int numRows = array.length;
    int numCols = array[0].length;

    double[] vector = new double[numRows];
    Arrays.fill(vector, 0.0);

    for (int k = 0; k < numRows; ++k) {
      for (int w = 0; w < numCols; ++w) {
        try {
          vector[k] += array[k][w];
        } catch (Exception e) {
          throw new RuntimeException(e);
        }
      }
    }
    for (int k = 0; k < numRows; ++k) {
      vector[k] = Gamma.digamma(vector[k]);
    }

    double[][] approx = new double[numRows][];
    for (int k = 0; k < numRows; ++k) {
      approx[k] = new double[numCols];
      for (int w = 0; w < numCols; ++w) {
        double z = Gamma.digamma(array[k][w]);
        approx[k][w] = z - vector[k];
      }
    }
    return approx;
  }
Beispiel #3
0
 static double[] dirichletExpectation(double[] array) {
   double sum = 0;
   for (double d : array) {
     sum += d;
   }
   double d = Gamma.digamma(sum);
   double[] result = new double[array.length];
   for (int i = 0; i < array.length; ++i) {
     result[i] = Gamma.digamma(array[i]) - d;
   }
   return result;
 }
 /**
  * Return the probability density for a particular point.
  *
  * @param x The point at which the density should be computed.
  * @return The pdf at point x.
  */
 public double density(Double x) {
   if (x < 0) return 0;
   return Math.pow(x / getBeta(), getAlpha() - 1)
       / getBeta()
       * Math.exp(-x / getBeta())
       / Math.exp(Gamma.logGamma(getAlpha()));
 }
  /**
   * Constructor with shape and scale parameters
   *
   * @param alpha shape parameter
   * @param beta scale parameter
   */
  public GammaDistribution(double alpha, double beta) {
    if (alpha <= 0.0 || beta <= 0.0) {
      throw new IllegalArgumentException("alpha and beta parameters must be positive");
    }

    this.alpha = alpha;
    this.beta = beta;
    logGammaAlpha = Gamma.logGamma(alpha);
    gammaDist = new GammaDistributionImpl(alpha, beta);
  }
  /**
   * For this distribution, X, this method returns P(X &lt; x).
   *
   * <p>The implementation of this method is based on:
   *
   * <ul>
   *   <li><a href="http://mathworld.wolfram.com/Chi-SquaredDistribution.html">Chi-Squared
   *       Distribution</a>, equation (9).
   *   <li>Casella, G., & Berger, R. (1990). <i>Statistical Inference</i>. Belmont, CA: Duxbury
   *       Press.
   * </ul>
   *
   * @param x the value at which the CDF is evaluated.
   * @return CDF for this distribution.
   * @throws MathException if the cumulative probability can not be computed due to convergence or
   *     other numerical errors.
   */
  public double cumulativeProbability(double x) throws MathException {
    double ret;

    if (x <= 0.0) {
      ret = 0.0;
    } else {
      ret = Gamma.regularizedGammaP(getAlpha(), x / getBeta());
    }

    return ret;
  }
Beispiel #7
0
  public static Matrix gammaLn(Matrix m) {

    int nc = m.data.length;
    int nr = m.data[0].length;
    double[][] result = new double[nc][];
    for (int k = 0; k < nc; ++k) {
      result[k] = new double[nr];
      for (int w = 0; w < nr; ++w) {
        result[k][w] = Gamma.logGamma(m.data[k][w]);
      }
    }
    return new Matrix(result);
  }
Beispiel #8
0
 public static double gammaLn(double d) {
   return Gamma.logGamma(d);
 }
 /** Recompute the normalization factor. */
 private void recomputeZ() {
   if (Double.isNaN(z)) {
     z = Gamma.logGamma(alpha) + Gamma.logGamma(beta) - Gamma.logGamma(alpha + beta);
   }
 }
Beispiel #10
0
  public static double pnbeta_raw(double x, double o_x, double a, double b, double ncp) {
    /* o_x  == 1 - x  but maybe more accurate */

    /* change errmax and itrmax if desired;
     * original (AS 226, R84) had  (errmax; itrmax) = (1e-6; 100) */
    final double errmax = 1.0e-9;
    final int itrmax = 10000; /* 100 is not enough for pf(ncp=200)
    see PR#11277 */

    double[] temp = new double[1];
    double[] tmp_c = new double[1];
    int[] ierr = new int[1];
    double a0, ax, lbeta, c, errbd, x0;
    int j;

    double ans, gx, q, sumq;

    if (ncp < 0. || a <= 0. || b <= 0.) {
      return DoubleVector.NaN;
    }

    if (x < 0. || o_x > 1. || (x == 0. && o_x == 1.)) {
      return 0.;
    }
    if (x > 1. || o_x < 0. || (x == 1. && o_x == 0.)) {
      return 1.;
    }

    c = ncp / 2.;

    /* initialize the series */

    x0 = Math.floor(Math.max(c - 7. * Math.sqrt(c), 0.));
    a0 = a + x0;
    lbeta =
        org.apache.commons.math.special.Gamma.logGamma(a0)
            + org.apache.commons.math.special.Gamma.logGamma(b)
            - org.apache.commons.math.special.Gamma.logGamma(a0 + b);
    /* temp = pbeta_raw(x, a0, b, TRUE, FALSE), but using (x, o_x): */
    Utils.bratio(a0, b, x, o_x, temp, tmp_c, ierr, false);

    gx =
        Math.exp(
            a0 * Math.log(x)
                + b * (x < .5 ? Math.log1p(-x) : Math.log(o_x))
                - lbeta
                - Math.log(a0));
    if (a0 > a) {
      q = Math.exp(-c + x0 * Math.log(c) - org.apache.commons.math.special.Gamma.logGamma(x0 + 1.));
    } else {
      q = Math.exp(-c);
    }

    sumq = 1. - q;
    ans = ax = q * temp[0];

    /* recurse over subsequent terms until convergence is achieved */
    j = (int) x0;
    do {
      j++;
      temp[0] -= gx;
      gx *= x * (a + b + j - 1.) / (a + j);
      q *= c / j;
      sumq -= q;
      ax = temp[0] * q;
      ans += ax;
      errbd = (temp[0] - gx) * sumq;
    } while (errbd > errmax && j < itrmax + x0);

    if (errbd > errmax) {
      // ML_ERROR(ME_PRECISION, "pnbeta");
    }
    if (j >= itrmax + x0) {
      // ML_ERROR(ME_NOCONV, "pnbeta");
    }

    return ans;
  }