Esempio n. 1
0
  /* (non-Javadoc)
   * @see net.finmath.stochastic.RandomVariableInterface#floor(net.finmath.stochastic.RandomVariableInterface)
   */
  public RandomVariableInterface floor(RandomVariableInterface randomVariable) {
    // Set time of this random variable to maximum of time with respect to which measurability is
    // known.
    double newTime = Math.max(time, randomVariable.getFiltrationTime());

    if (isDeterministic() && randomVariable.isDeterministic()) {
      double newValueIfNonStochastic = FastMath.max(valueIfNonStochastic, randomVariable.get(0));
      return new RandomVariable(newTime, newValueIfNonStochastic);
    } else if (isDeterministic()) return randomVariable.floor(this);
    else {
      double[] newRealizations = new double[Math.max(size(), randomVariable.size())];
      for (int i = 0; i < newRealizations.length; i++)
        newRealizations[i] = FastMath.max(realizations[i], randomVariable.get(i));
      return new RandomVariable(newTime, newRealizations);
    }
  }
Esempio n. 2
0
    /**
     * Generate the coefficients.
     *
     * @param date the current date
     * @throws OrekitException if an error occurs while generating the coefficients
     */
    public void generateCoefficients(final AbsoluteDate date) throws OrekitException {
      // Compute only if there is at least one non-resonant tesseral
      if (!nonResOrders.isEmpty() || mDailiesOnly) {
        // Gmsj and Hmsj polynomials
        ghMSJ = new GHmsjPolynomials(k, h, alpha, beta, I);

        // GAMMAmns function
        gammaMNS = new GammaMnsFunction(fact, gamma, I);

        final int maxRoaPower = FastMath.max(maxDegreeTesseralSP, maxDegreeMdailyTesseralSP);

        // R / a up to power degree
        for (int i = 1; i <= maxRoaPower; i++) {
          roaPow[i] = roa * roaPow[i - 1];
        }

        // generate the m-daily coefficients
        for (int m = 1; m <= maxOrderMdailyTesseralSP; m++) {
          buildFourierCoefficients(date, m, 0, maxDegreeMdailyTesseralSP);
        }

        // generate the other coefficients only if required
        if (!mDailiesOnly) {
          for (int m : nonResOrders.keySet()) {
            final List<Integer> listJ = nonResOrders.get(m);

            for (int j : listJ) {

              buildFourierCoefficients(date, m, j, maxDegreeTesseralSP);
            }
          }
        }
      }
    }
Esempio n. 3
0
  /**
   * Get the lightning ratio ([0-1]).
   *
   * @param position the satellite's position in the selected frame.
   * @param frame in which is defined the position
   * @param date the date
   * @return lightning ratio
   * @exception OrekitException if the trajectory is inside the Earth
   */
  public double getLightningRatio(
      final Vector3D position, final Frame frame, final AbsoluteDate date) throws OrekitException {

    // Compute useful angles
    final double[] angle = getEclipseAngles(position, frame, date);

    // Sat-Sun / Sat-CentralBody angle
    final double sunEarthAngle = angle[0];

    // Central Body apparent radius
    final double alphaCentral = angle[1];

    // Sun apparent radius
    final double alphaSun = angle[2];

    double result = 1.0;

    // Is the satellite in complete umbra ?
    if (sunEarthAngle - alphaCentral + alphaSun <= 0.0) {
      result = 0.0;
    } else if (sunEarthAngle - alphaCentral - alphaSun < 0.0) {
      // Compute a lightning ratio in penumbra
      final double sEA2 = sunEarthAngle * sunEarthAngle;
      final double oo2sEA = 1.0 / (2. * sunEarthAngle);
      final double aS2 = alphaSun * alphaSun;
      final double aE2 = alphaCentral * alphaCentral;
      final double aE2maS2 = aE2 - aS2;

      final double alpha1 = (sEA2 - aE2maS2) * oo2sEA;
      final double alpha2 = (sEA2 + aE2maS2) * oo2sEA;

      // Protection against numerical inaccuracy at boundaries
      final double a1oaS = FastMath.min(1.0, FastMath.max(-1.0, alpha1 / alphaSun));
      final double aS2ma12 = FastMath.max(0.0, aS2 - alpha1 * alpha1);
      final double a2oaE = FastMath.min(1.0, FastMath.max(-1.0, alpha2 / alphaCentral));
      final double aE2ma22 = FastMath.max(0.0, aE2 - alpha2 * alpha2);

      final double P1 = aS2 * FastMath.acos(a1oaS) - alpha1 * FastMath.sqrt(aS2ma12);
      final double P2 = aE2 * FastMath.acos(a2oaE) - alpha2 * FastMath.sqrt(aE2ma22);

      result = 1. - (P1 + P2) / (FastMath.PI * aS2);
    }

    return result;
  }
Esempio n. 4
0
  /**
   * Create the objects needed for linear transformation.
   *
   * <p>Each {@link org.orekit.propagation.semianalytical.dsst.utilities.hansenHansenTesseralLinear
   * HansenTesseralLinear} uses a fixed value for s and j. Since j varies from -maxJ to +maxJ and s
   * varies from -maxDegree to +maxDegree, a 2 * maxDegree + 1 x 2 * maxJ + 1 matrix of objects
   * should be created. The size of this matrix can be reduced by taking into account the expression
   * (2.7.3-2). This means that is enough to create the objects for positive values of j and all
   * values of s.
   *
   * @param meanOnly create only the objects required for the mean contribution
   */
  private void createHansenObjects(final boolean meanOnly) {
    // Allocate the two dimensional array
    this.hansenObjects = new HansenTesseralLinear[2 * maxDegree + 1][jMax + 1];

    if (meanOnly) {
      // loop through the resonant orders
      for (int m : resOrders) {
        // Compute the corresponding j term
        final int j = FastMath.max(1, (int) FastMath.round(ratio * m));

        // Compute the sMin and sMax values
        final int sMin = FastMath.min(maxEccPow - j, maxDegree);
        final int sMax = FastMath.min(maxEccPow + j, maxDegree);

        // loop through the s values
        for (int s = 0; s <= sMax; s++) {
          // Compute the n0 value
          final int n0 = FastMath.max(FastMath.max(2, m), s);

          // Create the object for the pair j,s
          this.hansenObjects[s + maxDegree][j] =
              new HansenTesseralLinear(maxDegree, s, j, n0, maxHansen);

          if (s > 0 && s <= sMin) {
            // Also create the object for the pair j, -s
            this.hansenObjects[maxDegree - s][j] =
                new HansenTesseralLinear(maxDegree, -s, j, n0, maxHansen);
          }
        }
      }
    } else {
      // create all objects
      for (int j = 0; j <= jMax; j++) {
        for (int s = -maxDegree; s <= maxDegree; s++) {
          // Compute the n0 value
          final int n0 = FastMath.max(2, FastMath.abs(s));

          this.hansenObjects[s + maxDegree][j] =
              new HansenTesseralLinear(maxDegree, s, j, n0, maxHansen);
        }
      }
    }
  }
Esempio n. 5
0
  /** {@inheritDoc} */
  @Override
  public void initialize(final AuxiliaryElements aux, final boolean meanOnly)
      throws OrekitException {

    // Keplerian period
    orbitPeriod = aux.getKeplerianPeriod();

    // orbit frame
    frame = aux.getFrame();

    // Set the highest power of the eccentricity in the analytical power
    // series expansion for the averaged high order resonant central body
    // spherical harmonic perturbation
    final double e = aux.getEcc();
    if (e <= 0.005) {
      maxEccPow = 3;
    } else if (e <= 0.02) {
      maxEccPow = 4;
    } else if (e <= 0.1) {
      maxEccPow = 7;
    } else if (e <= 0.2) {
      maxEccPow = 10;
    } else if (e <= 0.3) {
      maxEccPow = 12;
    } else if (e <= 0.4) {
      maxEccPow = 15;
    } else {
      maxEccPow = 20;
    }

    // Set the maximum power of the eccentricity to use in Hansen coefficient Kernel expansion.
    maxHansen = maxEccPow / 2;
    jMax = FastMath.min(MAXJ, maxDegree + maxEccPow);

    // Ratio of satellite to central body periods to define resonant terms
    ratio = orbitPeriod / bodyPeriod;

    // Compute the resonant tesseral harmonic terms if not set by the user
    getResonantAndNonResonantTerms(meanOnly);

    // initialize the HansenTesseralLinear objects needed
    createHansenObjects(meanOnly);

    if (!meanOnly) {
      // Initialize the Tesseral Short Periodics coefficient class
      tesseralSPCoefs =
          new TesseralShortPeriodicCoefficients(
              jMax,
              FastMath.max(maxOrderTesseralSP, maxOrderMdailyTesseralSP),
              INTERPOLATION_POINTS);
    }
  }
  /**
   * Check if the optimization algorithm has converged considering the last two points. This method
   * may be called several time from the same algorithm iteration with different points. This can be
   * detected by checking the iteration number at each call if needed. Each time this method is
   * called, the previous and current point correspond to points with the same role at each
   * iteration, so they can be compared. As an example, simplex-based algorithms call this method
   * for all points of the simplex, not only for the best or worst ones.
   *
   * @param iteration Index of current iteration
   * @param previous Best point in the previous iteration.
   * @param current Best point in the current iteration.
   * @return {@code true} if the algorithm has converged.
   */
  @Override
  public boolean converged(
      final int iteration, final PointValuePair previous, final PointValuePair current) {
    if (maxIterationCount != ITERATION_CHECK_DISABLED && iteration >= maxIterationCount) {
      return true;
    }

    final double p = previous.getValue();
    final double c = current.getValue();
    final double difference = FastMath.abs(p - c);
    final double size = FastMath.max(FastMath.abs(p), FastMath.abs(c));
    return difference <= size * getRelativeThreshold() || difference <= getAbsoluteThreshold();
  }
Esempio n. 7
0
 public static int calculateCityLoyalty(int tax_rate, EfsIni efs_ini, Game game) {
   int excom_penalty = 0;
   // System.out.println(" state = " + game.getDiplomacy().getDiplomaticState(game.getTurn(),
   // C.THE_CHURCH));
   if (game.getDiplomacy().getDiplomaticState(game.getTurn(), C.THE_CHURCH) == C.DS_WAR) {
     excom_penalty = game.getEfs_ini().excom_peasant_loyalty_hit;
     // System.out.println("WTF");
   }
   // System.out.println("excom_peasant_loyalty_hit = " +
   // game.getEfs_ini().excom_peasant_loyalty_hit);
   // System.out.println("Excom penalty = " + excom_penalty);
   return FastMath.max(
       0,
       FastMath.min(
           100, 100 - (tax_rate - efs_ini.default_tax_rate) * C.TAX_LOYALTY_HIT - excom_penalty));
 }
Esempio n. 8
0
 @Test
 public void testNextLongWideRange() {
   long lower = -0x6543210FEDCBA987L;
   long upper = 0x456789ABCDEF0123L;
   long max = Long.MIN_VALUE;
   long min = Long.MAX_VALUE;
   for (int i = 0; i < 10000000; ++i) {
     long r = randomData.nextLong(lower, upper);
     max = FastMath.max(max, r);
     min = FastMath.min(min, r);
     Assert.assertTrue(r >= lower);
     Assert.assertTrue(r <= upper);
   }
   double ratio = (((double) max) - ((double) min)) / (((double) upper) - ((double) lower));
   Assert.assertTrue(ratio > 0.99999);
 }
Esempio n. 9
0
 @Test
 public void testNextIntWideRange() {
   int lower = -0x6543210F;
   int upper = 0x456789AB;
   int max = Integer.MIN_VALUE;
   int min = Integer.MAX_VALUE;
   for (int i = 0; i < 1000000; ++i) {
     int r = randomData.nextInt(lower, upper);
     max = FastMath.max(max, r);
     min = FastMath.min(min, r);
     Assert.assertTrue(r >= lower);
     Assert.assertTrue(r <= upper);
   }
   double ratio = (((double) max) - ((double) min)) / (((double) upper) - ((double) lower));
   Assert.assertTrue(ratio > 0.99999);
 }
 /* 136:    */
 /* 137:    */ private double findUpperBound(UnivariateFunction f, double a, double h)
       /* 138:    */ {
   /* 139:249 */ double yA = f.value(a);
   /* 140:250 */ double yB = yA;
   /* 141:251 */ for (double step = h;
       step < 1.7976931348623157E+308D;
       step *= FastMath.max(2.0D, yA / yB))
   /* 142:    */ {
     /* 143:252 */ double b = a + step;
     /* 144:253 */ yB = f.value(b);
     /* 145:254 */ if (yA * yB <= 0.0D) {
       /* 146:255 */ return b;
       /* 147:    */ }
     /* 148:    */ }
   /* 149:258 */ throw new MathIllegalStateException(
       LocalizedFormats.UNABLE_TO_BRACKET_OPTIMUM_IN_LINE_SEARCH, new Object[0]);
   /* 150:    */ }
  /**
   * Create a triangular real distribution using the given lower limit, upper limit, and mode.
   *
   * @param a Lower limit of this distribution (inclusive).
   * @param b Upper limit of this distribution (inclusive).
   * @param c Mode of this distribution.
   * @throws NumberIsTooLargeException if {@code a >= b} or if {@code c > b}
   * @throws NumberIsTooSmallException if {@code c < a}
   */
  public TriangularDistribution(double a, double c, double b)
      throws NumberIsTooLargeException, NumberIsTooSmallException {
    if (a >= b) {
      throw new NumberIsTooLargeException(
          LocalizedFormats.LOWER_BOUND_NOT_BELOW_UPPER_BOUND, a, b, false);
    }
    if (c < a) {
      throw new NumberIsTooSmallException(LocalizedFormats.NUMBER_TOO_SMALL, c, a, true);
    }
    if (c > b) {
      throw new NumberIsTooLargeException(LocalizedFormats.NUMBER_TOO_LARGE, c, b, true);
    }

    this.a = a;
    this.c = c;
    this.b = b;
    solverAbsoluteAccuracy = FastMath.max(FastMath.ulp(a), FastMath.ulp(b));
  }
Esempio n. 12
0
  /**
   * Returns the value of Δ(p) + Δ(q) - Δ(p + q), with p, q ≥ 10. Based on the <em>NSWC Library of
   * Mathematics Subroutines</em> double precision implementation, {@code DBCORR}. In {@code
   * BetaTest.testSumDeltaMinusDeltaSum()}, this private method is accessed through reflection.
   *
   * @param p First argument.
   * @param q Second argument.
   * @return the value of {@code Delta(p) + Delta(q) - Delta(p + q)}.
   * @throws NumberIsTooSmallException if {@code p < 10.0} or {@code q < 10.0}.
   */
  private static double sumDeltaMinusDeltaSum(final double p, final double q) {

    if (p < 10.0) {
      throw new NumberIsTooSmallException(p, 10.0, true);
    }
    if (q < 10.0) {
      throw new NumberIsTooSmallException(q, 10.0, true);
    }

    final double a = FastMath.min(p, q);
    final double b = FastMath.max(p, q);
    final double sqrtT = 10.0 / a;
    final double t = sqrtT * sqrtT;
    double z = DELTA[DELTA.length - 1];
    for (int i = DELTA.length - 2; i >= 0; i--) {
      z = t * z + DELTA[i];
    }
    return z / a + deltaMinusDeltaSum(a, b);
  }
Esempio n. 13
0
  /**
   * Set the voxel type at the given coordinates.
   *
   * @param type The new voxel type to be set
   * @param x
   * @param y
   * @param z
   */
  public synchronized void set(int type, int x, int y, int z) {
    Node node = root;
    int parentLvl = depth - 1;
    int level = parentLvl;
    for (int i = depth - 1; i >= 0; --i) {
      level = i;
      parents[i] = node;

      if (node.type == type) {
        return;
      } else if (node.children == null) {
        node.subdivide();
        parentLvl = i;
      }

      int xbit = 1 & (x >> i);
      int ybit = 1 & (y >> i);
      int zbit = 1 & (z >> i);
      node = node.children[(xbit << 2) | (ybit << 1) | zbit];
    }
    node.type = type;

    // merge nodes where all children have been set to the same type
    for (int i = level; i <= parentLvl; ++i) {
      Node parent = parents[i];

      boolean allSame = true;
      for (Node child : parent.children) {
        if (child.type != node.type) {
          allSame = false;
          break;
        }
      }

      if (allSame) {
        parent.merge(node.type);
        cacheLevel = FastMath.max(i, cacheLevel);
      } else {
        break;
      }
    }
  }
Esempio n. 14
0
  /**
   * Get the resonant and non-resonant tesseral terms in the central body spherical harmonic field.
   *
   * @param resonantOnly extract only resonant terms
   */
  private void getResonantAndNonResonantTerms(final boolean resonantOnly) {

    // Compute natural resonant terms
    final double tolerance =
        1. / FastMath.max(MIN_PERIOD_IN_SAT_REV, MIN_PERIOD_IN_SECONDS / orbitPeriod);

    // Search the resonant orders in the tesseral harmonic field
    resOrders.clear();
    nonResOrders.clear();
    for (int m = 1; m <= maxOrder; m++) {
      final double resonance = ratio * m;
      int jRes = 0;
      final int jComputedRes = (int) FastMath.round(resonance);
      if (jComputedRes > 0
          && jComputedRes <= jMax
          && FastMath.abs(resonance - jComputedRes) <= tolerance) {
        // Store each resonant index and order
        resOrders.add(m);
        jRes = jComputedRes;
      }

      if (!resonantOnly && !mDailiesOnly && m <= maxOrderTesseralSP) {
        // compute non resonant orders in the tesseral harmonic field
        final List<Integer> listJofM = new ArrayList<Integer>();
        // for the moment we take only the pairs (j,m) with |j| <= maxDegree + maxEccPow (from |s-j|
        // <= maxEccPow and |s| <= maxDegree)
        for (int j = -jMax; j <= jMax; j++) {
          if (j != 0 && j != jRes) {
            listJofM.add(j);
          }
        }

        nonResOrders.put(m, listJofM);
      }
    }
  }
Esempio n. 15
0
  /**
   * Compute the n-SUM for potential derivatives components.
   *
   * @param date current date
   * @param j resonant index <i>j</i>
   * @param m resonant order <i>m</i>
   * @param s d'Alembert characteristic <i>s</i>
   * @param maxN maximum possible value for <i>n</i> index
   * @param roaPow powers of R/a up to degree <i>n</i>
   * @param ghMSJ G<sup>j</sup><sub>m,s</sub> and H<sup>j</sup><sub>m,s</sub> polynomials
   * @param gammaMNS &Gamma;<sup>m</sup><sub>n,s</sub>(γ) function
   * @return Components of U<sub>n</sub> derivatives for fixed j, m, s
   * @throws OrekitException if some error occurred
   */
  private double[][] computeNSum(
      final AbsoluteDate date,
      final int j,
      final int m,
      final int s,
      final int maxN,
      final double[] roaPow,
      final GHmsjPolynomials ghMSJ,
      final GammaMnsFunction gammaMNS)
      throws OrekitException {

    // spherical harmonics
    final UnnormalizedSphericalHarmonics harmonics = provider.onDate(date);

    // Potential derivatives components
    double dUdaCos = 0.;
    double dUdaSin = 0.;
    double dUdhCos = 0.;
    double dUdhSin = 0.;
    double dUdkCos = 0.;
    double dUdkSin = 0.;
    double dUdlCos = 0.;
    double dUdlSin = 0.;
    double dUdAlCos = 0.;
    double dUdAlSin = 0.;
    double dUdBeCos = 0.;
    double dUdBeSin = 0.;
    double dUdGaCos = 0.;
    double dUdGaSin = 0.;

    // I^m
    final int Im = I > 0 ? 1 : (m % 2 == 0 ? 1 : -1);

    // jacobi v, w, indices from 2.7.1-(15)
    final int v = FastMath.abs(m - s);
    final int w = FastMath.abs(m + s);

    // Initialise lower degree nmin = (Max(2, m, |s|)) for summation over n
    final int nmin = FastMath.max(FastMath.max(2, m), FastMath.abs(s));

    // Get the corresponding Hansen object
    final int sIndex = maxDegree + (j < 0 ? -s : s);
    final int jIndex = FastMath.abs(j);
    final HansenTesseralLinear hans = this.hansenObjects[sIndex][jIndex];

    // n-SUM from nmin to N
    for (int n = nmin; n <= maxN; n++) {
      // If (n - s) is odd, the contribution is null because of Vmns
      if ((n - s) % 2 == 0) {

        // Vmns coefficient
        final double fns = fact[n + FastMath.abs(s)];
        final double vMNS = CoefficientsFactory.getVmns(m, n, s, fns, fact[n - m]);

        // Inclination function Gamma and derivative
        final double gaMNS = gammaMNS.getValue(m, n, s);
        final double dGaMNS = gammaMNS.getDerivative(m, n, s);

        // Hansen kernel value and derivative
        final double kJNS = hans.getValue(-n - 1, chi);
        final double dkJNS = hans.getDerivative(-n - 1, chi);

        // Gjms, Hjms polynomials and derivatives
        final double gMSJ = ghMSJ.getGmsj(m, s, j);
        final double hMSJ = ghMSJ.getHmsj(m, s, j);
        final double dGdh = ghMSJ.getdGmsdh(m, s, j);
        final double dGdk = ghMSJ.getdGmsdk(m, s, j);
        final double dGdA = ghMSJ.getdGmsdAlpha(m, s, j);
        final double dGdB = ghMSJ.getdGmsdBeta(m, s, j);
        final double dHdh = ghMSJ.getdHmsdh(m, s, j);
        final double dHdk = ghMSJ.getdHmsdk(m, s, j);
        final double dHdA = ghMSJ.getdHmsdAlpha(m, s, j);
        final double dHdB = ghMSJ.getdHmsdBeta(m, s, j);

        // Jacobi l-index from 2.7.1-(15)
        final int l = FastMath.min(n - m, n - FastMath.abs(s));
        // Jacobi polynomial and derivative
        final DerivativeStructure jacobi =
            JacobiPolynomials.getValue(l, v, w, new DerivativeStructure(1, 1, 0, gamma));

        // Geopotential coefficients
        final double cnm = harmonics.getUnnormalizedCnm(n, m);
        final double snm = harmonics.getUnnormalizedSnm(n, m);

        // Common factors from expansion of equations 3.3-4
        final double cf_0 = roaPow[n] * Im * vMNS;
        final double cf_1 = cf_0 * gaMNS * jacobi.getValue();
        final double cf_2 = cf_1 * kJNS;
        final double gcPhs = gMSJ * cnm + hMSJ * snm;
        final double gsMhc = gMSJ * snm - hMSJ * cnm;
        final double dKgcPhsx2 = 2. * dkJNS * gcPhs;
        final double dKgsMhcx2 = 2. * dkJNS * gsMhc;
        final double dUdaCoef = (n + 1) * cf_2;
        final double dUdlCoef = j * cf_2;
        final double dUdGaCoef =
            cf_0 * kJNS * (jacobi.getValue() * dGaMNS + gaMNS * jacobi.getPartialDerivative(1));

        // dU / da components
        dUdaCos += dUdaCoef * gcPhs;
        dUdaSin += dUdaCoef * gsMhc;

        // dU / dh components
        dUdhCos += cf_1 * (kJNS * (cnm * dGdh + snm * dHdh) + h * dKgcPhsx2);
        dUdhSin += cf_1 * (kJNS * (snm * dGdh - cnm * dHdh) + h * dKgsMhcx2);

        // dU / dk components
        dUdkCos += cf_1 * (kJNS * (cnm * dGdk + snm * dHdk) + k * dKgcPhsx2);
        dUdkSin += cf_1 * (kJNS * (snm * dGdk - cnm * dHdk) + k * dKgsMhcx2);

        // dU / dLambda components
        dUdlCos += dUdlCoef * gsMhc;
        dUdlSin += -dUdlCoef * gcPhs;

        // dU / alpha components
        dUdAlCos += cf_2 * (dGdA * cnm + dHdA * snm);
        dUdAlSin += cf_2 * (dGdA * snm - dHdA * cnm);

        // dU / dBeta components
        dUdBeCos += cf_2 * (dGdB * cnm + dHdB * snm);
        dUdBeSin += cf_2 * (dGdB * snm - dHdB * cnm);

        // dU / dGamma components
        dUdGaCos += dUdGaCoef * gcPhs;
        dUdGaSin += dUdGaCoef * gsMhc;
      }
    }

    return new double[][] {
      {dUdaCos, dUdaSin},
      {dUdhCos, dUdhSin},
      {dUdkCos, dUdkSin},
      {dUdlCos, dUdlSin},
      {dUdAlCos, dUdAlSin},
      {dUdBeCos, dUdBeSin},
      {dUdGaCos, dUdGaSin}
    };
  }
Esempio n. 16
0
  /**
   * Computes the potential U derivatives.
   *
   * <p>The following elements are computed from expression 3.3 - (4).
   *
   * <pre>
   *  dU / da
   *  dU / dh
   *  dU / dk
   *  dU / dλ
   *  dU / dα
   *  dU / dβ
   *  dU / dγ
   *  </pre>
   *
   * @param date current date
   * @return potential derivatives
   * @throws OrekitException if an error occurs
   */
  private double[] computeUDerivatives(final AbsoluteDate date) throws OrekitException {

    // Potential derivatives
    double dUda = 0.;
    double dUdh = 0.;
    double dUdk = 0.;
    double dUdl = 0.;
    double dUdAl = 0.;
    double dUdBe = 0.;
    double dUdGa = 0.;

    // Compute only if there is at least one resonant tesseral
    if (!resOrders.isEmpty()) {
      // Gmsj and Hmsj polynomials
      final GHmsjPolynomials ghMSJ = new GHmsjPolynomials(k, h, alpha, beta, I);

      // GAMMAmns function
      final GammaMnsFunction gammaMNS = new GammaMnsFunction(fact, gamma, I);

      // R / a up to power degree
      final double[] roaPow = new double[maxDegree + 1];
      roaPow[0] = 1.;
      for (int i = 1; i <= maxDegree; i++) {
        roaPow[i] = roa * roaPow[i - 1];
      }

      // SUM over resonant terms {j,m}
      for (int m : resOrders) {

        // Resonant index for the current resonant order
        final int j = FastMath.max(1, (int) FastMath.round(ratio * m));

        // Phase angle
        final double jlMmt = j * lm - m * theta;
        final double sinPhi = FastMath.sin(jlMmt);
        final double cosPhi = FastMath.cos(jlMmt);

        // Potential derivatives components for a given resonant pair {j,m}
        double dUdaCos = 0.;
        double dUdaSin = 0.;
        double dUdhCos = 0.;
        double dUdhSin = 0.;
        double dUdkCos = 0.;
        double dUdkSin = 0.;
        double dUdlCos = 0.;
        double dUdlSin = 0.;
        double dUdAlCos = 0.;
        double dUdAlSin = 0.;
        double dUdBeCos = 0.;
        double dUdBeSin = 0.;
        double dUdGaCos = 0.;
        double dUdGaSin = 0.;

        // s-SUM from -sMin to sMax
        final int sMin = FastMath.min(maxEccPow - j, maxDegree);
        final int sMax = FastMath.min(maxEccPow + j, maxDegree);
        for (int s = 0; s <= sMax; s++) {

          // Compute the initial values for Hansen coefficients using newComb operators
          this.hansenObjects[s + maxDegree][j].computeInitValues(e2, chi, chi2);

          // n-SUM for s positive
          final double[][] nSumSpos =
              computeNSum(date, j, m, s, maxDegree, roaPow, ghMSJ, gammaMNS);
          dUdaCos += nSumSpos[0][0];
          dUdaSin += nSumSpos[0][1];
          dUdhCos += nSumSpos[1][0];
          dUdhSin += nSumSpos[1][1];
          dUdkCos += nSumSpos[2][0];
          dUdkSin += nSumSpos[2][1];
          dUdlCos += nSumSpos[3][0];
          dUdlSin += nSumSpos[3][1];
          dUdAlCos += nSumSpos[4][0];
          dUdAlSin += nSumSpos[4][1];
          dUdBeCos += nSumSpos[5][0];
          dUdBeSin += nSumSpos[5][1];
          dUdGaCos += nSumSpos[6][0];
          dUdGaSin += nSumSpos[6][1];

          // n-SUM for s negative
          if (s > 0 && s <= sMin) {
            // Compute the initial values for Hansen coefficients using newComb operators
            this.hansenObjects[maxDegree - s][j].computeInitValues(e2, chi, chi2);

            final double[][] nSumSneg =
                computeNSum(date, j, m, -s, maxDegree, roaPow, ghMSJ, gammaMNS);
            dUdaCos += nSumSneg[0][0];
            dUdaSin += nSumSneg[0][1];
            dUdhCos += nSumSneg[1][0];
            dUdhSin += nSumSneg[1][1];
            dUdkCos += nSumSneg[2][0];
            dUdkSin += nSumSneg[2][1];
            dUdlCos += nSumSneg[3][0];
            dUdlSin += nSumSneg[3][1];
            dUdAlCos += nSumSneg[4][0];
            dUdAlSin += nSumSneg[4][1];
            dUdBeCos += nSumSneg[5][0];
            dUdBeSin += nSumSneg[5][1];
            dUdGaCos += nSumSneg[6][0];
            dUdGaSin += nSumSneg[6][1];
          }
        }

        // Assembly of potential derivatives componants
        dUda += cosPhi * dUdaCos + sinPhi * dUdaSin;
        dUdh += cosPhi * dUdhCos + sinPhi * dUdhSin;
        dUdk += cosPhi * dUdkCos + sinPhi * dUdkSin;
        dUdl += cosPhi * dUdlCos + sinPhi * dUdlSin;
        dUdAl += cosPhi * dUdAlCos + sinPhi * dUdAlSin;
        dUdBe += cosPhi * dUdBeCos + sinPhi * dUdBeSin;
        dUdGa += cosPhi * dUdGaCos + sinPhi * dUdGaSin;
      }

      dUda *= -moa / a;
      dUdh *= moa;
      dUdk *= moa;
      dUdl *= moa;
      dUdAl *= moa;
      dUdBe *= moa;
      dUdGa *= moa;
    }

    return new double[] {dUda, dUdh, dUdk, dUdl, dUdAl, dUdBe, dUdGa};
  }
Esempio n. 17
0
 /** {@inheritDoc} */
 public double value(double x, double y) {
   return FastMath.max(x, y);
 }
  /** {@inheritDoc} */
  @Override
  protected double doSolve() {

    // prepare arrays with the first points
    final double[] x = new double[maximalOrder + 1];
    final double[] y = new double[maximalOrder + 1];
    x[0] = getMin();
    x[1] = getStartValue();
    x[2] = getMax();
    verifySequence(x[0], x[1], x[2]);

    // evaluate initial guess
    y[1] = computeObjectiveValue(x[1]);
    if (Precision.equals(y[1], 0.0, 1)) {
      // return the initial guess if it is a perfect root.
      return x[1];
    }

    // evaluate first  endpoint
    y[0] = computeObjectiveValue(x[0]);
    if (Precision.equals(y[0], 0.0, 1)) {
      // return the first endpoint if it is a perfect root.
      return x[0];
    }

    int nbPoints;
    int signChangeIndex;
    if (y[0] * y[1] < 0) {

      // reduce interval if it brackets the root
      nbPoints = 2;
      signChangeIndex = 1;

    } else {

      // evaluate second endpoint
      y[2] = computeObjectiveValue(x[2]);
      if (Precision.equals(y[2], 0.0, 1)) {
        // return the second endpoint if it is a perfect root.
        return x[2];
      }

      if (y[1] * y[2] < 0) {
        // use all computed point as a start sampling array for solving
        nbPoints = 3;
        signChangeIndex = 2;
      } else {
        throw new NoBracketingException(x[0], x[2], y[0], y[2]);
      }
    }

    // prepare a work array for inverse polynomial interpolation
    final double[] tmpX = new double[x.length];

    // current tightest bracketing of the root
    double xA = x[signChangeIndex - 1];
    double yA = y[signChangeIndex - 1];
    double absYA = FastMath.abs(yA);
    int agingA = 0;
    double xB = x[signChangeIndex];
    double yB = y[signChangeIndex];
    double absYB = FastMath.abs(yB);
    int agingB = 0;

    // search loop
    while (true) {

      // check convergence of bracketing interval
      final double xTol =
          getAbsoluteAccuracy()
              + getRelativeAccuracy() * FastMath.max(FastMath.abs(xA), FastMath.abs(xB));
      if (((xB - xA) <= xTol) || (FastMath.max(absYA, absYB) < getFunctionValueAccuracy())) {
        switch (allowed) {
          case ANY_SIDE:
            return absYA < absYB ? xA : xB;
          case LEFT_SIDE:
            return xA;
          case RIGHT_SIDE:
            return xB;
          case BELOW_SIDE:
            return (yA <= 0) ? xA : xB;
          case ABOVE_SIDE:
            return (yA < 0) ? xB : xA;
          default:
            // this should never happen
            throw new MathInternalError(null);
        }
      }

      // target for the next evaluation point
      double targetY;
      if (agingA >= MAXIMAL_AGING) {
        // we keep updating the high bracket, try to compensate this
        final int p = agingA - MAXIMAL_AGING;
        final double weightA = (1 << p) - 1;
        final double weightB = p + 1;
        targetY = (weightA * yA - weightB * REDUCTION_FACTOR * yB) / (weightA + weightB);
      } else if (agingB >= MAXIMAL_AGING) {
        // we keep updating the low bracket, try to compensate this
        final int p = agingB - MAXIMAL_AGING;
        final double weightA = p + 1;
        final double weightB = (1 << p) - 1;
        targetY = (weightB * yB - weightA * REDUCTION_FACTOR * yA) / (weightA + weightB);
      } else {
        // bracketing is balanced, try to find the root itself
        targetY = 0;
      }

      // make a few attempts to guess a root,
      double nextX;
      int start = 0;
      int end = nbPoints;
      do {

        // guess a value for current target, using inverse polynomial interpolation
        System.arraycopy(x, start, tmpX, start, end - start);
        nextX = guessX(targetY, tmpX, y, start, end);

        if (!((nextX > xA) && (nextX < xB))) {
          // the guessed root is not strictly inside of the tightest bracketing interval

          // the guessed root is either not strictly inside the interval or it
          // is a NaN (which occurs when some sampling points share the same y)
          // we try again with a lower interpolation order
          if (signChangeIndex - start >= end - signChangeIndex) {
            // we have more points before the sign change, drop the lowest point
            ++start;
          } else {
            // we have more points after sign change, drop the highest point
            --end;
          }

          // we need to do one more attempt
          nextX = Double.NaN;
        }

      } while (Double.isNaN(nextX) && (end - start > 1));

      if (Double.isNaN(nextX)) {
        // fall back to bisection
        nextX = xA + 0.5 * (xB - xA);
        start = signChangeIndex - 1;
        end = signChangeIndex;
      }

      // evaluate the function at the guessed root
      final double nextY = computeObjectiveValue(nextX);
      if (Precision.equals(nextY, 0.0, 1)) {
        // we have found an exact root, since it is not an approximation
        // we don't need to bother about the allowed solutions setting
        return nextX;
      }

      if ((nbPoints > 2) && (end - start != nbPoints)) {

        // we have been forced to ignore some points to keep bracketing,
        // they are probably too far from the root, drop them from now on
        nbPoints = end - start;
        System.arraycopy(x, start, x, 0, nbPoints);
        System.arraycopy(y, start, y, 0, nbPoints);
        signChangeIndex -= start;

      } else if (nbPoints == x.length) {

        // we have to drop one point in order to insert the new one
        nbPoints--;

        // keep the tightest bracketing interval as centered as possible
        if (signChangeIndex >= (x.length + 1) / 2) {
          // we drop the lowest point, we have to shift the arrays and the index
          System.arraycopy(x, 1, x, 0, nbPoints);
          System.arraycopy(y, 1, y, 0, nbPoints);
          --signChangeIndex;
        }
      }

      // insert the last computed point
      // (by construction, we know it lies inside the tightest bracketing interval)
      System.arraycopy(x, signChangeIndex, x, signChangeIndex + 1, nbPoints - signChangeIndex);
      x[signChangeIndex] = nextX;
      System.arraycopy(y, signChangeIndex, y, signChangeIndex + 1, nbPoints - signChangeIndex);
      y[signChangeIndex] = nextY;
      ++nbPoints;

      // update the bracketing interval
      if (nextY * yA <= 0) {
        // the sign change occurs before the inserted point
        xB = nextX;
        yB = nextY;
        absYB = FastMath.abs(yB);
        ++agingA;
        agingB = 0;
      } else {
        // the sign change occurs after the inserted point
        xA = nextX;
        yA = nextY;
        absYA = FastMath.abs(yA);
        agingA = 0;
        ++agingB;

        // update the sign change index
        signChangeIndex++;
      }
    }
  }
  /**
   * Calculates the compact Singular Value Decomposition of the given matrix.
   *
   * @param matrix Matrix to decompose.
   */
  public SingularValueDecomposition(final RealMatrix matrix) {
    final double[][] A;

    // "m" is always the largest dimension.
    if (matrix.getRowDimension() < matrix.getColumnDimension()) {
      transposed = true;
      A = matrix.transpose().getData();
      m = matrix.getColumnDimension();
      n = matrix.getRowDimension();
    } else {
      transposed = false;
      A = matrix.getData();
      m = matrix.getRowDimension();
      n = matrix.getColumnDimension();
    }

    singularValues = new double[n];
    final double[][] U = new double[m][n];
    final double[][] V = new double[n][n];
    final double[] e = new double[n];
    final double[] work = new double[m];
    // Reduce A to bidiagonal form, storing the diagonal elements
    // in s and the super-diagonal elements in e.
    final int nct = FastMath.min(m - 1, n);
    final int nrt = FastMath.max(0, n - 2);
    for (int k = 0; k < FastMath.max(nct, nrt); k++) {
      if (k < nct) {
        // Compute the transformation for the k-th column and
        // place the k-th diagonal in s[k].
        // Compute 2-norm of k-th column without under/overflow.
        singularValues[k] = 0;
        for (int i = k; i < m; i++) {
          singularValues[k] = FastMath.hypot(singularValues[k], A[i][k]);
        }
        if (singularValues[k] != 0) {
          if (A[k][k] < 0) {
            singularValues[k] = -singularValues[k];
          }
          for (int i = k; i < m; i++) {
            A[i][k] /= singularValues[k];
          }
          A[k][k] += 1;
        }
        singularValues[k] = -singularValues[k];
      }
      for (int j = k + 1; j < n; j++) {
        if (k < nct && singularValues[k] != 0) {
          // Apply the transformation.
          double t = 0;
          for (int i = k; i < m; i++) {
            t += A[i][k] * A[i][j];
          }
          t = -t / A[k][k];
          for (int i = k; i < m; i++) {
            A[i][j] += t * A[i][k];
          }
        }
        // Place the k-th row of A into e for the
        // subsequent calculation of the row transformation.
        e[j] = A[k][j];
      }
      if (k < nct) {
        // Place the transformation in U for subsequent back
        // multiplication.
        for (int i = k; i < m; i++) {
          U[i][k] = A[i][k];
        }
      }
      if (k < nrt) {
        // Compute the k-th row transformation and place the
        // k-th super-diagonal in e[k].
        // Compute 2-norm without under/overflow.
        e[k] = 0;
        for (int i = k + 1; i < n; i++) {
          e[k] = FastMath.hypot(e[k], e[i]);
        }
        if (e[k] != 0) {
          if (e[k + 1] < 0) {
            e[k] = -e[k];
          }
          for (int i = k + 1; i < n; i++) {
            e[i] /= e[k];
          }
          e[k + 1] += 1;
        }
        e[k] = -e[k];
        if (k + 1 < m && e[k] != 0) {
          // Apply the transformation.
          for (int i = k + 1; i < m; i++) {
            work[i] = 0;
          }
          for (int j = k + 1; j < n; j++) {
            for (int i = k + 1; i < m; i++) {
              work[i] += e[j] * A[i][j];
            }
          }
          for (int j = k + 1; j < n; j++) {
            final double t = -e[j] / e[k + 1];
            for (int i = k + 1; i < m; i++) {
              A[i][j] += t * work[i];
            }
          }
        }

        // Place the transformation in V for subsequent
        // back multiplication.
        for (int i = k + 1; i < n; i++) {
          V[i][k] = e[i];
        }
      }
    }
    // Set up the final bidiagonal matrix or order p.
    int p = n;
    if (nct < n) {
      singularValues[nct] = A[nct][nct];
    }
    if (m < p) {
      singularValues[p - 1] = 0;
    }
    if (nrt + 1 < p) {
      e[nrt] = A[nrt][p - 1];
    }
    e[p - 1] = 0;

    // Generate U.
    for (int j = nct; j < n; j++) {
      for (int i = 0; i < m; i++) {
        U[i][j] = 0;
      }
      U[j][j] = 1;
    }
    for (int k = nct - 1; k >= 0; k--) {
      if (singularValues[k] != 0) {
        for (int j = k + 1; j < n; j++) {
          double t = 0;
          for (int i = k; i < m; i++) {
            t += U[i][k] * U[i][j];
          }
          t = -t / U[k][k];
          for (int i = k; i < m; i++) {
            U[i][j] += t * U[i][k];
          }
        }
        for (int i = k; i < m; i++) {
          U[i][k] = -U[i][k];
        }
        U[k][k] = 1 + U[k][k];
        for (int i = 0; i < k - 1; i++) {
          U[i][k] = 0;
        }
      } else {
        for (int i = 0; i < m; i++) {
          U[i][k] = 0;
        }
        U[k][k] = 1;
      }
    }

    // Generate V.
    for (int k = n - 1; k >= 0; k--) {
      if (k < nrt && e[k] != 0) {
        for (int j = k + 1; j < n; j++) {
          double t = 0;
          for (int i = k + 1; i < n; i++) {
            t += V[i][k] * V[i][j];
          }
          t = -t / V[k + 1][k];
          for (int i = k + 1; i < n; i++) {
            V[i][j] += t * V[i][k];
          }
        }
      }
      for (int i = 0; i < n; i++) {
        V[i][k] = 0;
      }
      V[k][k] = 1;
    }

    // Main iteration loop for the singular values.
    final int pp = p - 1;
    int iter = 0;
    while (p > 0) {
      int k;
      int kase;
      // Here is where a test for too many iterations would go.
      // This section of the program inspects for
      // negligible elements in the s and e arrays.  On
      // completion the variables kase and k are set as follows.
      // kase = 1     if s(p) and e[k-1] are negligible and k<p
      // kase = 2     if s(k) is negligible and k<p
      // kase = 3     if e[k-1] is negligible, k<p, and
      //              s(k), ..., s(p) are not negligible (qr step).
      // kase = 4     if e(p-1) is negligible (convergence).
      for (k = p - 2; k >= 0; k--) {
        final double threshold =
            TINY + EPS * (FastMath.abs(singularValues[k]) + FastMath.abs(singularValues[k + 1]));

        // the following condition is written this way in order
        // to break out of the loop when NaN occurs, writing it
        // as "if (FastMath.abs(e[k]) <= threshold)" would loop
        // indefinitely in case of NaNs because comparison on NaNs
        // always return false, regardless of what is checked
        // see issue MATH-947
        if (!(FastMath.abs(e[k]) > threshold)) {
          e[k] = 0;
          break;
        }
      }

      if (k == p - 2) {
        kase = 4;
      } else {
        int ks;
        for (ks = p - 1; ks >= k; ks--) {
          if (ks == k) {
            break;
          }
          final double t =
              (ks != p ? FastMath.abs(e[ks]) : 0) + (ks != k + 1 ? FastMath.abs(e[ks - 1]) : 0);
          if (FastMath.abs(singularValues[ks]) <= TINY + EPS * t) {
            singularValues[ks] = 0;
            break;
          }
        }
        if (ks == k) {
          kase = 3;
        } else if (ks == p - 1) {
          kase = 1;
        } else {
          kase = 2;
          k = ks;
        }
      }
      k++;
      // Perform the task indicated by kase.
      switch (kase) {
          // Deflate negligible s(p).
        case 1:
          {
            double f = e[p - 2];
            e[p - 2] = 0;
            for (int j = p - 2; j >= k; j--) {
              double t = FastMath.hypot(singularValues[j], f);
              final double cs = singularValues[j] / t;
              final double sn = f / t;
              singularValues[j] = t;
              if (j != k) {
                f = -sn * e[j - 1];
                e[j - 1] = cs * e[j - 1];
              }

              for (int i = 0; i < n; i++) {
                t = cs * V[i][j] + sn * V[i][p - 1];
                V[i][p - 1] = -sn * V[i][j] + cs * V[i][p - 1];
                V[i][j] = t;
              }
            }
          }
          break;
          // Split at negligible s(k).
        case 2:
          {
            double f = e[k - 1];
            e[k - 1] = 0;
            for (int j = k; j < p; j++) {
              double t = FastMath.hypot(singularValues[j], f);
              final double cs = singularValues[j] / t;
              final double sn = f / t;
              singularValues[j] = t;
              f = -sn * e[j];
              e[j] = cs * e[j];

              for (int i = 0; i < m; i++) {
                t = cs * U[i][j] + sn * U[i][k - 1];
                U[i][k - 1] = -sn * U[i][j] + cs * U[i][k - 1];
                U[i][j] = t;
              }
            }
          }
          break;
          // Perform one qr step.
        case 3:
          {
            // Calculate the shift.
            final double maxPm1Pm2 =
                FastMath.max(
                    FastMath.abs(singularValues[p - 1]), FastMath.abs(singularValues[p - 2]));
            final double scale =
                FastMath.max(
                    FastMath.max(
                        FastMath.max(maxPm1Pm2, FastMath.abs(e[p - 2])),
                        FastMath.abs(singularValues[k])),
                    FastMath.abs(e[k]));
            final double sp = singularValues[p - 1] / scale;
            final double spm1 = singularValues[p - 2] / scale;
            final double epm1 = e[p - 2] / scale;
            final double sk = singularValues[k] / scale;
            final double ek = e[k] / scale;
            final double b = ((spm1 + sp) * (spm1 - sp) + epm1 * epm1) / 2.0;
            final double c = (sp * epm1) * (sp * epm1);
            double shift = 0;
            if (b != 0 || c != 0) {
              shift = FastMath.sqrt(b * b + c);
              if (b < 0) {
                shift = -shift;
              }
              shift = c / (b + shift);
            }
            double f = (sk + sp) * (sk - sp) + shift;
            double g = sk * ek;
            // Chase zeros.
            for (int j = k; j < p - 1; j++) {
              double t = FastMath.hypot(f, g);
              double cs = f / t;
              double sn = g / t;
              if (j != k) {
                e[j - 1] = t;
              }
              f = cs * singularValues[j] + sn * e[j];
              e[j] = cs * e[j] - sn * singularValues[j];
              g = sn * singularValues[j + 1];
              singularValues[j + 1] = cs * singularValues[j + 1];

              for (int i = 0; i < n; i++) {
                t = cs * V[i][j] + sn * V[i][j + 1];
                V[i][j + 1] = -sn * V[i][j] + cs * V[i][j + 1];
                V[i][j] = t;
              }
              t = FastMath.hypot(f, g);
              cs = f / t;
              sn = g / t;
              singularValues[j] = t;
              f = cs * e[j] + sn * singularValues[j + 1];
              singularValues[j + 1] = -sn * e[j] + cs * singularValues[j + 1];
              g = sn * e[j + 1];
              e[j + 1] = cs * e[j + 1];
              if (j < m - 1) {
                for (int i = 0; i < m; i++) {
                  t = cs * U[i][j] + sn * U[i][j + 1];
                  U[i][j + 1] = -sn * U[i][j] + cs * U[i][j + 1];
                  U[i][j] = t;
                }
              }
            }
            e[p - 2] = f;
            iter = iter + 1;
          }
          break;
          // Convergence.
        default:
          {
            // Make the singular values positive.
            if (singularValues[k] <= 0) {
              singularValues[k] = singularValues[k] < 0 ? -singularValues[k] : 0;

              for (int i = 0; i <= pp; i++) {
                V[i][k] = -V[i][k];
              }
            }
            // Order the singular values.
            while (k < pp) {
              if (singularValues[k] >= singularValues[k + 1]) {
                break;
              }
              double t = singularValues[k];
              singularValues[k] = singularValues[k + 1];
              singularValues[k + 1] = t;
              if (k < n - 1) {
                for (int i = 0; i < n; i++) {
                  t = V[i][k + 1];
                  V[i][k + 1] = V[i][k];
                  V[i][k] = t;
                }
              }
              if (k < m - 1) {
                for (int i = 0; i < m; i++) {
                  t = U[i][k + 1];
                  U[i][k + 1] = U[i][k];
                  U[i][k] = t;
                }
              }
              k++;
            }
            iter = 0;
            p--;
          }
          break;
      }
    }

    // Set the small value tolerance used to calculate rank and pseudo-inverse
    tol = FastMath.max(m * singularValues[0] * EPS, FastMath.sqrt(Precision.SAFE_MIN));

    if (!transposed) {
      cachedU = MatrixUtils.createRealMatrix(U);
      cachedV = MatrixUtils.createRealMatrix(V);
    } else {
      cachedU = MatrixUtils.createRealMatrix(V);
      cachedV = MatrixUtils.createRealMatrix(U);
    }
  }
Esempio n. 20
0
 /**
  * Return the lowest domain value for the given hypergeometric distribution parameters.
  *
  * @param n Population size.
  * @param m Number of successes in the population.
  * @param k Sample size.
  * @return the lowest domain value of the hypergeometric distribution.
  */
 private int getLowerDomain(int n, int m, int k) {
   return FastMath.max(0, m - (n - k));
 }
  /** {@inheritDoc} */
  @Override
  public void integrate(final ExpandableStatefulODE equations, final double t)
      throws MathIllegalStateException, MathIllegalArgumentException {

    sanityChecks(equations, t);
    setEquations(equations);
    final boolean forward = t > equations.getTime();

    // create some internal working arrays
    final double[] y0 = equations.getCompleteState();
    final double[] y = y0.clone();
    final int stages = c.length + 1;
    final double[][] yDotK = new double[stages][y.length];
    final double[] yTmp = y0.clone();
    final double[] yDotTmp = new double[y.length];

    // set up an interpolator sharing the integrator arrays
    final RungeKuttaStepInterpolator interpolator = (RungeKuttaStepInterpolator) prototype.copy();
    interpolator.reinitialize(
        this, yTmp, yDotK, forward, equations.getPrimaryMapper(), equations.getSecondaryMappers());
    interpolator.storeTime(equations.getTime());

    // set up integration control objects
    stepStart = equations.getTime();
    double hNew = 0;
    boolean firstTime = true;
    initIntegration(equations.getTime(), y0, t);

    // main integration loop
    isLastStep = false;
    do {

      interpolator.shift();

      // iterate over step size, ensuring local normalized error is smaller than 1
      double error = 10;
      while (error >= 1.0) {

        if (firstTime || !fsal) {
          // first stage
          computeDerivatives(stepStart, y, yDotK[0]);
        }

        if (firstTime) {
          final double[] scale = new double[mainSetDimension];
          if (vecAbsoluteTolerance == null) {
            for (int i = 0; i < scale.length; ++i) {
              scale[i] = scalAbsoluteTolerance + scalRelativeTolerance * FastMath.abs(y[i]);
            }
          } else {
            for (int i = 0; i < scale.length; ++i) {
              scale[i] = vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * FastMath.abs(y[i]);
            }
          }
          hNew = initializeStep(forward, getOrder(), scale, stepStart, y, yDotK[0], yTmp, yDotK[1]);
          firstTime = false;
        }

        stepSize = hNew;
        if (forward) {
          if (stepStart + stepSize >= t) {
            stepSize = t - stepStart;
          }
        } else {
          if (stepStart + stepSize <= t) {
            stepSize = t - stepStart;
          }
        }

        // next stages
        for (int k = 1; k < stages; ++k) {

          for (int j = 0; j < y0.length; ++j) {
            double sum = a[k - 1][0] * yDotK[0][j];
            for (int l = 1; l < k; ++l) {
              sum += a[k - 1][l] * yDotK[l][j];
            }
            yTmp[j] = y[j] + stepSize * sum;
          }

          computeDerivatives(stepStart + c[k - 1] * stepSize, yTmp, yDotK[k]);
        }

        // estimate the state at the end of the step
        for (int j = 0; j < y0.length; ++j) {
          double sum = b[0] * yDotK[0][j];
          for (int l = 1; l < stages; ++l) {
            sum += b[l] * yDotK[l][j];
          }
          yTmp[j] = y[j] + stepSize * sum;
        }

        // estimate the error at the end of the step
        error = estimateError(yDotK, y, yTmp, stepSize);
        if (error >= 1.0) {
          // reject the step and attempt to reduce error by stepsize control
          final double factor =
              FastMath.min(
                  maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
          hNew = filterStep(stepSize * factor, forward, false);
        }
      }

      // local error is small enough: accept the step, trigger events and step handlers
      interpolator.storeTime(stepStart + stepSize);
      System.arraycopy(yTmp, 0, y, 0, y0.length);
      System.arraycopy(yDotK[stages - 1], 0, yDotTmp, 0, y0.length);
      stepStart = acceptStep(interpolator, y, yDotTmp, t);
      System.arraycopy(y, 0, yTmp, 0, y.length);

      if (!isLastStep) {

        // prepare next step
        interpolator.storeTime(stepStart);

        if (fsal) {
          // save the last evaluation for the next step
          System.arraycopy(yDotTmp, 0, yDotK[0], 0, y0.length);
        }

        // stepsize control for next step
        final double factor =
            FastMath.min(maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
        final double scaledH = stepSize * factor;
        final double nextT = stepStart + scaledH;
        final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
        hNew = filterStep(scaledH, forward, nextIsLast);

        final double filteredNextT = stepStart + hNew;
        final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t);
        if (filteredNextIsLast) {
          hNew = t - stepStart;
        }
      }

    } while (!isLastStep);

    // dispatch results
    equations.setTime(stepStart);
    equations.setCompleteState(y);

    resetInternalState();
  }
Esempio n. 22
0
 /**
  * {@inheritDoc}
  *
  * <p>For population size {@code N}, number of successes {@code m}, and sample size {@code n}, the
  * lower bound of the support is {@code max(0, n + m - N)}.
  *
  * @return lower bound of the support
  */
 public int getSupportLowerBound() {
   return FastMath.max(0, getSampleSize() + getNumberOfSuccesses() - getPopulationSize());
 }
  /**
   * Determines the Levenberg-Marquardt parameter.
   *
   * <p>This implementation is a translation in Java of the MINPACK <a
   * href="http://www.netlib.org/minpack/lmpar.f">lmpar</a> routine.
   *
   * <p>This method sets the lmPar and lmDir attributes.
   *
   * <p>The authors of the original fortran function are:
   *
   * <ul>
   *   <li>Argonne National Laboratory. MINPACK project. March 1980
   *   <li>Burton S. Garbow
   *   <li>Kenneth E. Hillstrom
   *   <li>Jorge J. More
   * </ul>
   *
   * <p>Luc Maisonobe did the Java translation.
   *
   * @param qy Array containing qTy.
   * @param delta Upper bound on the euclidean norm of diagR * lmDir.
   * @param diag Diagonal matrix.
   * @param internalData Data (modified in-place in this method).
   * @param solvedCols Number of solved point.
   * @param work1 work array
   * @param work2 work array
   * @param work3 work array
   * @param lmDir the "returned" LM direction will be stored in this array.
   * @param lmPar the value of the LM parameter from the previous iteration.
   * @return the new LM parameter
   */
  private double determineLMParameter(
      double[] qy,
      double delta,
      double[] diag,
      InternalData internalData,
      int solvedCols,
      double[] work1,
      double[] work2,
      double[] work3,
      double[] lmDir,
      double lmPar) {
    final double[][] weightedJacobian = internalData.weightedJacobian;
    final int[] permutation = internalData.permutation;
    final int rank = internalData.rank;
    final double[] diagR = internalData.diagR;

    final int nC = weightedJacobian[0].length;

    // compute and store in x the gauss-newton direction, if the
    // jacobian is rank-deficient, obtain a least squares solution
    for (int j = 0; j < rank; ++j) {
      lmDir[permutation[j]] = qy[j];
    }
    for (int j = rank; j < nC; ++j) {
      lmDir[permutation[j]] = 0;
    }
    for (int k = rank - 1; k >= 0; --k) {
      int pk = permutation[k];
      double ypk = lmDir[pk] / diagR[pk];
      for (int i = 0; i < k; ++i) {
        lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
      }
      lmDir[pk] = ypk;
    }

    // evaluate the function at the origin, and test
    // for acceptance of the Gauss-Newton direction
    double dxNorm = 0;
    for (int j = 0; j < solvedCols; ++j) {
      int pj = permutation[j];
      double s = diag[pj] * lmDir[pj];
      work1[pj] = s;
      dxNorm += s * s;
    }
    dxNorm = FastMath.sqrt(dxNorm);
    double fp = dxNorm - delta;
    if (fp <= 0.1 * delta) {
      lmPar = 0;
      return lmPar;
    }

    // if the jacobian is not rank deficient, the Newton step provides
    // a lower bound, parl, for the zero of the function,
    // otherwise set this bound to zero
    double sum2;
    double parl = 0;
    if (rank == solvedCols) {
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] *= diag[pj] / dxNorm;
      }
      sum2 = 0;
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        double sum = 0;
        for (int i = 0; i < j; ++i) {
          sum += weightedJacobian[i][pj] * work1[permutation[i]];
        }
        double s = (work1[pj] - sum) / diagR[pj];
        work1[pj] = s;
        sum2 += s * s;
      }
      parl = fp / (delta * sum2);
    }

    // calculate an upper bound, paru, for the zero of the function
    sum2 = 0;
    for (int j = 0; j < solvedCols; ++j) {
      int pj = permutation[j];
      double sum = 0;
      for (int i = 0; i <= j; ++i) {
        sum += weightedJacobian[i][pj] * qy[i];
      }
      sum /= diag[pj];
      sum2 += sum * sum;
    }
    double gNorm = FastMath.sqrt(sum2);
    double paru = gNorm / delta;
    if (paru == 0) {
      paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
    }

    // if the input par lies outside of the interval (parl,paru),
    // set par to the closer endpoint
    lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
    if (lmPar == 0) {
      lmPar = gNorm / dxNorm;
    }

    for (int countdown = 10; countdown >= 0; --countdown) {

      // evaluate the function at the current value of lmPar
      if (lmPar == 0) {
        lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
      }
      double sPar = FastMath.sqrt(lmPar);
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] = sPar * diag[pj];
      }
      determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);

      dxNorm = 0;
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        double s = diag[pj] * lmDir[pj];
        work3[pj] = s;
        dxNorm += s * s;
      }
      dxNorm = FastMath.sqrt(dxNorm);
      double previousFP = fp;
      fp = dxNorm - delta;

      // if the function is small enough, accept the current value
      // of lmPar, also test for the exceptional cases where parl is zero
      if (FastMath.abs(fp) <= 0.1 * delta || (parl == 0 && fp <= previousFP && previousFP < 0)) {
        return lmPar;
      }

      // compute the Newton correction
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] = work3[pj] * diag[pj] / dxNorm;
      }
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] /= work2[j];
        double tmp = work1[pj];
        for (int i = j + 1; i < solvedCols; ++i) {
          work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
        }
      }
      sum2 = 0;
      for (int j = 0; j < solvedCols; ++j) {
        double s = work1[permutation[j]];
        sum2 += s * s;
      }
      double correction = fp / (delta * sum2);

      // depending on the sign of the function, update parl or paru.
      if (fp > 0) {
        parl = FastMath.max(parl, lmPar);
      } else if (fp < 0) {
        paru = FastMath.min(paru, lmPar);
      }

      // compute an improved estimate for lmPar
      lmPar = FastMath.max(parl, lmPar + correction);
    }

    return lmPar;
  }
  /** {@inheritDoc} */
  public Optimum optimize(final LeastSquaresProblem problem) {
    // Pull in relevant data from the problem as locals.
    final int nR = problem.getObservationSize(); // Number of observed data.
    final int nC = problem.getParameterSize(); // Number of parameters.
    // Counters.
    final Incrementor iterationCounter = problem.getIterationCounter();
    final Incrementor evaluationCounter = problem.getEvaluationCounter();
    // Convergence criterion.
    final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();

    // arrays shared with the other private methods
    final int solvedCols = FastMath.min(nR, nC);
    /* Parameters evolution direction associated with lmPar. */
    double[] lmDir = new double[nC];
    /* Levenberg-Marquardt parameter. */
    double lmPar = 0;

    // local point
    double delta = 0;
    double xNorm = 0;
    double[] diag = new double[nC];
    double[] oldX = new double[nC];
    double[] oldRes = new double[nR];
    double[] qtf = new double[nR];
    double[] work1 = new double[nC];
    double[] work2 = new double[nC];
    double[] work3 = new double[nC];

    // Evaluate the function at the starting point and calculate its norm.
    evaluationCounter.incrementCount();
    // value will be reassigned in the loop
    Evaluation current = problem.evaluate(problem.getStart());
    double[] currentResiduals = current.getResiduals().toArray();
    double currentCost = current.getCost();
    double[] currentPoint = current.getPoint().toArray();

    // Outer loop.
    boolean firstIteration = true;
    while (true) {
      iterationCounter.incrementCount();

      final Evaluation previous = current;

      // QR decomposition of the jacobian matrix
      final InternalData internalData = qrDecomposition(current.getJacobian(), solvedCols);
      final double[][] weightedJacobian = internalData.weightedJacobian;
      final int[] permutation = internalData.permutation;
      final double[] diagR = internalData.diagR;
      final double[] jacNorm = internalData.jacNorm;

      // residuals already have weights applied
      double[] weightedResidual = currentResiduals;
      for (int i = 0; i < nR; i++) {
        qtf[i] = weightedResidual[i];
      }

      // compute Qt.res
      qTy(qtf, internalData);

      // now we don't need Q anymore,
      // so let jacobian contain the R matrix with its diagonal elements
      for (int k = 0; k < solvedCols; ++k) {
        int pk = permutation[k];
        weightedJacobian[k][pk] = diagR[pk];
      }

      if (firstIteration) {
        // scale the point according to the norms of the columns
        // of the initial jacobian
        xNorm = 0;
        for (int k = 0; k < nC; ++k) {
          double dk = jacNorm[k];
          if (dk == 0) {
            dk = 1.0;
          }
          double xk = dk * currentPoint[k];
          xNorm += xk * xk;
          diag[k] = dk;
        }
        xNorm = FastMath.sqrt(xNorm);

        // initialize the step bound delta
        delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
      }

      // check orthogonality between function vector and jacobian columns
      double maxCosine = 0;
      if (currentCost != 0) {
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          double s = jacNorm[pj];
          if (s != 0) {
            double sum = 0;
            for (int i = 0; i <= j; ++i) {
              sum += weightedJacobian[i][pj] * qtf[i];
            }
            maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
          }
        }
      }
      if (maxCosine <= orthoTolerance) {
        // Convergence has been reached.
        return new OptimumImpl(current, evaluationCounter.getCount(), iterationCounter.getCount());
      }

      // rescale if necessary
      for (int j = 0; j < nC; ++j) {
        diag[j] = FastMath.max(diag[j], jacNorm[j]);
      }

      // Inner loop.
      for (double ratio = 0; ratio < 1.0e-4; ) {

        // save the state
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          oldX[pj] = currentPoint[pj];
        }
        final double previousCost = currentCost;
        double[] tmpVec = weightedResidual;
        weightedResidual = oldRes;
        oldRes = tmpVec;

        // determine the Levenberg-Marquardt parameter
        lmPar =
            determineLMParameter(
                qtf, delta, diag, internalData, solvedCols, work1, work2, work3, lmDir, lmPar);

        // compute the new point and the norm of the evolution direction
        double lmNorm = 0;
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          lmDir[pj] = -lmDir[pj];
          currentPoint[pj] = oldX[pj] + lmDir[pj];
          double s = diag[pj] * lmDir[pj];
          lmNorm += s * s;
        }
        lmNorm = FastMath.sqrt(lmNorm);
        // on the first iteration, adjust the initial step bound.
        if (firstIteration) {
          delta = FastMath.min(delta, lmNorm);
        }

        // Evaluate the function at x + p and calculate its norm.
        evaluationCounter.incrementCount();
        current = problem.evaluate(new ArrayRealVector(currentPoint));
        currentResiduals = current.getResiduals().toArray();
        currentCost = current.getCost();
        currentPoint = current.getPoint().toArray();

        // compute the scaled actual reduction
        double actRed = -1.0;
        if (0.1 * currentCost < previousCost) {
          double r = currentCost / previousCost;
          actRed = 1.0 - r * r;
        }

        // compute the scaled predicted reduction
        // and the scaled directional derivative
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          double dirJ = lmDir[pj];
          work1[j] = 0;
          for (int i = 0; i <= j; ++i) {
            work1[i] += weightedJacobian[i][pj] * dirJ;
          }
        }
        double coeff1 = 0;
        for (int j = 0; j < solvedCols; ++j) {
          coeff1 += work1[j] * work1[j];
        }
        double pc2 = previousCost * previousCost;
        coeff1 /= pc2;
        double coeff2 = lmPar * lmNorm * lmNorm / pc2;
        double preRed = coeff1 + 2 * coeff2;
        double dirDer = -(coeff1 + coeff2);

        // ratio of the actual to the predicted reduction
        ratio = (preRed == 0) ? 0 : (actRed / preRed);

        // update the step bound
        if (ratio <= 0.25) {
          double tmp = (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
          if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
            tmp = 0.1;
          }
          delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
          lmPar /= tmp;
        } else if ((lmPar == 0) || (ratio >= 0.75)) {
          delta = 2 * lmNorm;
          lmPar *= 0.5;
        }

        // test for successful iteration.
        if (ratio >= 1.0e-4) {
          // successful iteration, update the norm
          firstIteration = false;
          xNorm = 0;
          for (int k = 0; k < nC; ++k) {
            double xK = diag[k] * currentPoint[k];
            xNorm += xK * xK;
          }
          xNorm = FastMath.sqrt(xNorm);

          // tests for convergence.
          if (checker != null
              && checker.converged(iterationCounter.getCount(), previous, current)) {
            return new OptimumImpl(
                current, evaluationCounter.getCount(), iterationCounter.getCount());
          }
        } else {
          // failed iteration, reset the previous values
          currentCost = previousCost;
          for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            currentPoint[pj] = oldX[pj];
          }
          tmpVec = weightedResidual;
          weightedResidual = oldRes;
          oldRes = tmpVec;
          // Reset "current" to previous values.
          current = previous;
        }

        // Default convergence criteria.
        if ((FastMath.abs(actRed) <= costRelativeTolerance
                && preRed <= costRelativeTolerance
                && ratio <= 2.0)
            || delta <= parRelativeTolerance * xNorm) {
          return new OptimumImpl(
              current, evaluationCounter.getCount(), iterationCounter.getCount());
        }

        // tests for termination and stringent tolerances
        if (FastMath.abs(actRed) <= TWO_EPS && preRed <= TWO_EPS && ratio <= 2.0) {
          throw new ConvergenceException(
              LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE, costRelativeTolerance);
        } else if (delta <= TWO_EPS * xNorm) {
          throw new ConvergenceException(
              LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE, parRelativeTolerance);
        } else if (maxCosine <= TWO_EPS) {
          throw new ConvergenceException(
              LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE, orthoTolerance);
        }
      }
    }
  }
  @Override
  public boolean shouldSkipTraversalResult(
      Vertex origin,
      Vertex target,
      State parent,
      State current,
      ShortestPathTree spt,
      RoutingRequest traverseOptions) {
    if (realTarget == null) return false;

    final Vertex vertex = current.getVertex();
    int vertexIndex = vertex.getIndex();
    if (vertexIndex < distance.length) {
      if (distance[vertexIndex] > 0.0) {
        targetDistance = distance[vertexIndex];
      } else {
        targetDistance =
            distanceLibrary.fastDistance(
                realTargetCoordinate.y, realTargetCoordinate.x, vertex.getY(), vertex.getX());
        distance[vertexIndex] = targetDistance;
        if (vertex instanceof TransitStop && targetDistance < bestTargetDistance) {
          bestTargetDistance = targetDistance;
        }
      }
    } else {
      targetDistance =
          distanceLibrary.fastDistance(
              realTargetCoordinate.y, realTargetCoordinate.x, vertex.getY(), vertex.getX());
    }

    final double remainingWalk = traverseOptions.maxWalkDistance - current.getWalkDistance();
    final double minWalk;
    double minTime = 0;
    if (targetDistance > remainingWalk) {
      // then we must have some transit + some walk.
      minWalk = this.distanceToNearestTransitStop + vertex.getDistanceToNearestTransitStop();
      minTime =
          options.isArriveBy() ? traverseOptions.getAlightSlack() : traverseOptions.getBoardSlack();

      if (current.getBackEdge() instanceof StreetEdge
          && transitLocalStreets != null
          && !transitLocalStreets.transferrable(vertex)) {
        return true;
      }
    } else {
      // could walk directly to destination
      if (targetDistance < distanceToNearestTransitStop
          || transitLocalStreets == null
          || !transitLocalStreets.transferrable(vertex)) minWalk = targetDistance;
      else minWalk = distanceToNearestTransitStop;
    }
    if (minWalk > remainingWalk) return true;

    final double optimisticDistance = current.getWalkDistance() + minWalk;

    final double walkTime = minWalk / speedUpperBound;
    minTime += (targetDistance - minWalk) / Raptor.MAX_TRANSIT_SPEED + walkTime;

    double stateTime = current.getOptimizedElapsedTime() + minTime;

    double walkDistance =
        FastMath.max(
            optimisticDistance * Raptor.WALK_EPSILON,
            optimisticDistance + transferTimeInWalkDistance);

    int i = 0;
    boolean prevBounded = !bounders.isEmpty();
    for (State bounder : bounders) {
      if (removedBoundingStates.contains(bounder)) continue;
      if (current.getWeight() + minTime + walkTime * (options.getWalkReluctance() - 1)
          > bounder.getWeight() * WORST_WEIGHT_DIFFERENCE_FACTOR) {
        return true;
      }
      int prevTime = previousArrivalTime.get(i++);

      if (walkDistance > bounder.getWalkDistance()
          && current.getNumBoardings() >= bounder.getNumBoardings()) {
        if (current.getElapsedTime() + minTime >= bounder.getElapsedTime()) {
          return true;
        } else if (prevTime > 0
            && (options.arriveBy
                ? (current.getTime() - minTime >= prevTime)
                : ((current.getTime() + minTime) <= prevTime))) {
          prevBounded = false;
        }
      } else {
        prevBounded = false;
      }

      // check that the new path is not much longer in time than the bounding path
      if (bounder.getOptimizedElapsedTime() * timeBoundFactor < stateTime) {
        return true;
      }
    }
    return prevBounded;
  }
Esempio n. 26
0
  /**
   * Returns the value of log B(p, q) for 0 ≤ x ≤ 1 and p, q > 0. Based on the <em>NSWC Library of
   * Mathematics Subroutines</em> implementation, {@code DBETLN}.
   *
   * @param p First argument.
   * @param q Second argument.
   * @return the value of {@code log(Beta(p, q))}, {@code NaN} if {@code p <= 0} or {@code q <= 0}.
   */
  public static double logBeta(final double p, final double q) {
    if (Double.isNaN(p) || Double.isNaN(q) || (p <= 0.0) || (q <= 0.0)) {
      return Double.NaN;
    }

    final double a = FastMath.min(p, q);
    final double b = FastMath.max(p, q);
    if (a >= 10.0) {
      final double w = sumDeltaMinusDeltaSum(a, b);
      final double h = a / b;
      final double c = h / (1.0 + h);
      final double u = -(a - 0.5) * FastMath.log(c);
      final double v = b * FastMath.log1p(h);
      if (u <= v) {
        return (((-0.5 * FastMath.log(b) + HALF_LOG_TWO_PI) + w) - u) - v;
      } else {
        return (((-0.5 * FastMath.log(b) + HALF_LOG_TWO_PI) + w) - v) - u;
      }
    } else if (a > 2.0) {
      if (b > 1000.0) {
        final int n = (int) FastMath.floor(a - 1.0);
        double prod = 1.0;
        double ared = a;
        for (int i = 0; i < n; i++) {
          ared -= 1.0;
          prod *= ared / (1.0 + ared / b);
        }
        return (FastMath.log(prod) - n * FastMath.log(b))
            + (Gamma.logGamma(ared) + logGammaMinusLogGammaSum(ared, b));
      } else {
        double prod1 = 1.0;
        double ared = a;
        while (ared > 2.0) {
          ared -= 1.0;
          final double h = ared / b;
          prod1 *= h / (1.0 + h);
        }
        if (b < 10.0) {
          double prod2 = 1.0;
          double bred = b;
          while (bred > 2.0) {
            bred -= 1.0;
            prod2 *= bred / (ared + bred);
          }
          return FastMath.log(prod1)
              + FastMath.log(prod2)
              + (Gamma.logGamma(ared) + (Gamma.logGamma(bred) - logGammaSum(ared, bred)));
        } else {
          return FastMath.log(prod1) + Gamma.logGamma(ared) + logGammaMinusLogGammaSum(ared, b);
        }
      }
    } else if (a >= 1.0) {
      if (b > 2.0) {
        if (b < 10.0) {
          double prod = 1.0;
          double bred = b;
          while (bred > 2.0) {
            bred -= 1.0;
            prod *= bred / (a + bred);
          }
          return FastMath.log(prod)
              + (Gamma.logGamma(a) + (Gamma.logGamma(bred) - logGammaSum(a, bred)));
        } else {
          return Gamma.logGamma(a) + logGammaMinusLogGammaSum(a, b);
        }
      } else {
        return Gamma.logGamma(a) + Gamma.logGamma(b) - logGammaSum(a, b);
      }
    } else {
      if (b >= 10.0) {
        return Gamma.logGamma(a) + logGammaMinusLogGammaSum(a, b);
      } else {
        // The following command is the original NSWC implementation.
        // return Gamma.logGamma(a) +
        // (Gamma.logGamma(b) - Gamma.logGamma(a + b));
        // The following command turns out to be more accurate.
        return FastMath.log(Gamma.gamma(a) * Gamma.gamma(b) / Gamma.gamma(a + b));
      }
    }
  }
Esempio n. 27
0
 /*  9:   */ public double value(double x, double y) /* 10:   */ {
   /* 11:32 */ return FastMath.max(x, y);
   /* 12:   */ }