public static double qnbeta( double p, double a, double b, double ncp, boolean lower_tail, boolean log_p) { final double accu = 1e-15; final double Eps = 1e-14; /* must be > accu */ double ux, lx, nx, pp; if (DoubleVector.isNaN(p) || DoubleVector.isNaN(a) || DoubleVector.isNaN(b) || DoubleVector.isNaN(ncp)) { return p + a + b + ncp; } if (!DoubleVector.isFinite(a)) { return DoubleVector.NaN; } if (ncp < 0. || a <= 0. || b <= 0.) { return DoubleVector.NaN; } // R_Q_P01_boundaries(p, 0, 1); if ((log_p && p > 0) || (!log_p && (p < 0 || p > 1))) { return DoubleVector.NaN; } if (p == SignRank.R_DT_0(lower_tail, log_p)) { return 0.0; } if (p == SignRank.R_DT_1(lower_tail, log_p)) { return 1.0; } // end of R_Q_P01_boundaries p = Normal.R_DT_qIv(p, log_p ? 1.0 : 0.0, lower_tail ? 1.0 : 0.0); /* Invert pnbeta(.) : * 1. finding an upper and lower bound */ if (p > 1 - SignRank.DBL_EPSILON) { return 1.0; } pp = Math.min(1 - SignRank.DBL_EPSILON, p * (1 + Eps)); for (ux = 0.5; ux < 1 - SignRank.DBL_EPSILON && pnbeta(ux, a, b, ncp, true, false) < pp; ux = 0.5 * (1 + ux)) ; pp = p * (1 - Eps); for (lx = 0.5; lx > Double.MIN_VALUE && pnbeta(lx, a, b, ncp, true, false) > pp; lx *= 0.5) ; /* 2. interval (lx,ux) halving : */ do { nx = 0.5 * (lx + ux); if (pnbeta(nx, a, b, ncp, true, false) > p) { ux = nx; } else { lx = nx; } } while ((ux - lx) / nx > accu); return 0.5 * (ux + lx); }
@Primitive public static double pnbeta( double x, double a, double b, double ncp, boolean lower_tail, boolean log_p) { if (DoubleVector.isNaN(x) || DoubleVector.isNaN(a) || DoubleVector.isNaN(b) || DoubleVector.isNaN(ncp)) { return x + a + b + ncp; } // R_P_bounds_01(x, 0., 1.); if (x <= 0.0) { return SignRank.R_DT_0(lower_tail, log_p); } if (x >= 1.0) { return SignRank.R_DT_1(lower_tail, log_p); } return pnbeta2(x, 1 - x, a, b, ncp, lower_tail, log_p); }
@Primitive public static double dnbeta(double x, double a, double b, double ncp, boolean give_log) { final double eps = 1.e-15; int kMax; double k, ncp2, dx2, d, D; double sum, term, p_k, q; /* They were LDOUBLE */ if (DoubleVector.isNaN(x) || DoubleVector.isNaN(a) || DoubleVector.isNaN(b) || DoubleVector.isNaN(ncp)) { return x + a + b + ncp; } if (ncp < 0 || a <= 0 || b <= 0) { return DoubleVector.NaN; } if (!DoubleVector.isFinite(a) || !DoubleVector.isFinite(b) || !DoubleVector.isFinite(ncp)) { return DoubleVector.NaN; } if (x < 0 || x > 1) { return (SignRank.R_D__0(true, give_log)); } if (ncp == 0) { return Distributions.dbeta(x, a, b, give_log); } /* New algorithm, starting with *largest* term : */ ncp2 = 0.5 * ncp; dx2 = ncp2 * x; d = (dx2 - a - 1) / 2; D = d * d + dx2 * (a + b) - a; if (D <= 0) { kMax = 0; } else { D = Math.ceil(d + Math.sqrt(D)); kMax = (D > 0) ? (int) D : 0; } /* The starting "middle term" --- first look at it's log scale: */ term = Distributions.dbeta(x, a + kMax, b, /* log = */ true); p_k = Poisson.dpois_raw(kMax, ncp2, true); if (x == 0. || !DoubleVector.isFinite(term) || !DoubleVector.isFinite(p_k)) /* if term = +Inf */ { return SignRank.R_D_exp(p_k + term, true, give_log); } /* Now if s_k := p_k * t_k {here = exp(p_k + term)} would underflow, * we should rather scale everything and re-scale at the end:*/ p_k += term; /* = log(p_k) + log(t_k) == log(s_k) -- used at end to rescale */ /* mid = 1 = the rescaled value, instead of mid = exp(p_k); */ /* Now sum from the inside out */ sum = term = 1. /* = mid term */; /* middle to the left */ k = kMax; while (k > 0 && term > sum * eps) { k--; q = /* 1 / r_k = */ (k + 1) * (k + a) / (k + a + b) / dx2; term *= q; sum += term; } /* middle to the right */ term = 1.; k = kMax; do { q = /* r_{old k} = */ dx2 * (k + a + b) / (k + a) / (k + 1); k++; term *= q; sum += term; } while (term > sum * eps); return SignRank.R_D_exp(p_k + Math.log(sum), true, give_log); }