Esempio n. 1
0
  /* (non-Javadoc)
   * @see net.finmath.stochastic.RandomVariableInterface#cap(net.finmath.stochastic.RandomVariableInterface)
   */
  public RandomVariableInterface cap(RandomVariableInterface randomVariable) {
    // Set time of this random variable to maximum of time with respect to which measurability is
    // known.
    double newTime = Math.max(time, randomVariable.getFiltrationTime());

    if (isDeterministic() && randomVariable.isDeterministic()) {
      double newValueIfNonStochastic = FastMath.min(valueIfNonStochastic, randomVariable.get(0));
      return new RandomVariable(newTime, newValueIfNonStochastic);
    } else if (isDeterministic()) return randomVariable.cap(this);
    else {
      double[] newRealizations = new double[Math.max(size(), randomVariable.size())];
      for (int i = 0; i < newRealizations.length; i++)
        newRealizations[i] = FastMath.min(realizations[i], randomVariable.get(i));
      return new RandomVariable(newTime, newRealizations);
    }
  }
Esempio n. 2
0
  /**
   * Get the lightning ratio ([0-1]).
   *
   * @param position the satellite's position in the selected frame.
   * @param frame in which is defined the position
   * @param date the date
   * @return lightning ratio
   * @exception OrekitException if the trajectory is inside the Earth
   */
  public double getLightningRatio(
      final Vector3D position, final Frame frame, final AbsoluteDate date) throws OrekitException {

    // Compute useful angles
    final double[] angle = getEclipseAngles(position, frame, date);

    // Sat-Sun / Sat-CentralBody angle
    final double sunEarthAngle = angle[0];

    // Central Body apparent radius
    final double alphaCentral = angle[1];

    // Sun apparent radius
    final double alphaSun = angle[2];

    double result = 1.0;

    // Is the satellite in complete umbra ?
    if (sunEarthAngle - alphaCentral + alphaSun <= 0.0) {
      result = 0.0;
    } else if (sunEarthAngle - alphaCentral - alphaSun < 0.0) {
      // Compute a lightning ratio in penumbra
      final double sEA2 = sunEarthAngle * sunEarthAngle;
      final double oo2sEA = 1.0 / (2. * sunEarthAngle);
      final double aS2 = alphaSun * alphaSun;
      final double aE2 = alphaCentral * alphaCentral;
      final double aE2maS2 = aE2 - aS2;

      final double alpha1 = (sEA2 - aE2maS2) * oo2sEA;
      final double alpha2 = (sEA2 + aE2maS2) * oo2sEA;

      // Protection against numerical inaccuracy at boundaries
      final double a1oaS = FastMath.min(1.0, FastMath.max(-1.0, alpha1 / alphaSun));
      final double aS2ma12 = FastMath.max(0.0, aS2 - alpha1 * alpha1);
      final double a2oaE = FastMath.min(1.0, FastMath.max(-1.0, alpha2 / alphaCentral));
      final double aE2ma22 = FastMath.max(0.0, aE2 - alpha2 * alpha2);

      final double P1 = aS2 * FastMath.acos(a1oaS) - alpha1 * FastMath.sqrt(aS2ma12);
      final double P2 = aE2 * FastMath.acos(a2oaE) - alpha2 * FastMath.sqrt(aE2ma22);

      result = 1. - (P1 + P2) / (FastMath.PI * aS2);
    }

    return result;
  }
Esempio n. 3
0
  /**
   * Create the objects needed for linear transformation.
   *
   * <p>Each {@link org.orekit.propagation.semianalytical.dsst.utilities.hansenHansenTesseralLinear
   * HansenTesseralLinear} uses a fixed value for s and j. Since j varies from -maxJ to +maxJ and s
   * varies from -maxDegree to +maxDegree, a 2 * maxDegree + 1 x 2 * maxJ + 1 matrix of objects
   * should be created. The size of this matrix can be reduced by taking into account the expression
   * (2.7.3-2). This means that is enough to create the objects for positive values of j and all
   * values of s.
   *
   * @param meanOnly create only the objects required for the mean contribution
   */
  private void createHansenObjects(final boolean meanOnly) {
    // Allocate the two dimensional array
    this.hansenObjects = new HansenTesseralLinear[2 * maxDegree + 1][jMax + 1];

    if (meanOnly) {
      // loop through the resonant orders
      for (int m : resOrders) {
        // Compute the corresponding j term
        final int j = FastMath.max(1, (int) FastMath.round(ratio * m));

        // Compute the sMin and sMax values
        final int sMin = FastMath.min(maxEccPow - j, maxDegree);
        final int sMax = FastMath.min(maxEccPow + j, maxDegree);

        // loop through the s values
        for (int s = 0; s <= sMax; s++) {
          // Compute the n0 value
          final int n0 = FastMath.max(FastMath.max(2, m), s);

          // Create the object for the pair j,s
          this.hansenObjects[s + maxDegree][j] =
              new HansenTesseralLinear(maxDegree, s, j, n0, maxHansen);

          if (s > 0 && s <= sMin) {
            // Also create the object for the pair j, -s
            this.hansenObjects[maxDegree - s][j] =
                new HansenTesseralLinear(maxDegree, -s, j, n0, maxHansen);
          }
        }
      }
    } else {
      // create all objects
      for (int j = 0; j <= jMax; j++) {
        for (int s = -maxDegree; s <= maxDegree; s++) {
          // Compute the n0 value
          final int n0 = FastMath.max(2, FastMath.abs(s));

          this.hansenObjects[s + maxDegree][j] =
              new HansenTesseralLinear(maxDegree, s, j, n0, maxHansen);
        }
      }
    }
  }
Esempio n. 4
0
  /** {@inheritDoc} */
  @Override
  public void initialize(final AuxiliaryElements aux, final boolean meanOnly)
      throws OrekitException {

    // Keplerian period
    orbitPeriod = aux.getKeplerianPeriod();

    // orbit frame
    frame = aux.getFrame();

    // Set the highest power of the eccentricity in the analytical power
    // series expansion for the averaged high order resonant central body
    // spherical harmonic perturbation
    final double e = aux.getEcc();
    if (e <= 0.005) {
      maxEccPow = 3;
    } else if (e <= 0.02) {
      maxEccPow = 4;
    } else if (e <= 0.1) {
      maxEccPow = 7;
    } else if (e <= 0.2) {
      maxEccPow = 10;
    } else if (e <= 0.3) {
      maxEccPow = 12;
    } else if (e <= 0.4) {
      maxEccPow = 15;
    } else {
      maxEccPow = 20;
    }

    // Set the maximum power of the eccentricity to use in Hansen coefficient Kernel expansion.
    maxHansen = maxEccPow / 2;
    jMax = FastMath.min(MAXJ, maxDegree + maxEccPow);

    // Ratio of satellite to central body periods to define resonant terms
    ratio = orbitPeriod / bodyPeriod;

    // Compute the resonant tesseral harmonic terms if not set by the user
    getResonantAndNonResonantTerms(meanOnly);

    // initialize the HansenTesseralLinear objects needed
    createHansenObjects(meanOnly);

    if (!meanOnly) {
      // Initialize the Tesseral Short Periodics coefficient class
      tesseralSPCoefs =
          new TesseralShortPeriodicCoefficients(
              jMax,
              FastMath.max(maxOrderTesseralSP, maxOrderMdailyTesseralSP),
              INTERPOLATION_POINTS);
    }
  }
Esempio n. 5
0
 @Test
 public void testNextLongWideRange() {
   long lower = -0x6543210FEDCBA987L;
   long upper = 0x456789ABCDEF0123L;
   long max = Long.MIN_VALUE;
   long min = Long.MAX_VALUE;
   for (int i = 0; i < 10000000; ++i) {
     long r = randomData.nextLong(lower, upper);
     max = FastMath.max(max, r);
     min = FastMath.min(min, r);
     Assert.assertTrue(r >= lower);
     Assert.assertTrue(r <= upper);
   }
   double ratio = (((double) max) - ((double) min)) / (((double) upper) - ((double) lower));
   Assert.assertTrue(ratio > 0.99999);
 }
Esempio n. 6
0
 public static int calculateCityLoyalty(int tax_rate, EfsIni efs_ini, Game game) {
   int excom_penalty = 0;
   // System.out.println(" state = " + game.getDiplomacy().getDiplomaticState(game.getTurn(),
   // C.THE_CHURCH));
   if (game.getDiplomacy().getDiplomaticState(game.getTurn(), C.THE_CHURCH) == C.DS_WAR) {
     excom_penalty = game.getEfs_ini().excom_peasant_loyalty_hit;
     // System.out.println("WTF");
   }
   // System.out.println("excom_peasant_loyalty_hit = " +
   // game.getEfs_ini().excom_peasant_loyalty_hit);
   // System.out.println("Excom penalty = " + excom_penalty);
   return FastMath.max(
       0,
       FastMath.min(
           100, 100 - (tax_rate - efs_ini.default_tax_rate) * C.TAX_LOYALTY_HIT - excom_penalty));
 }
Esempio n. 7
0
 @Test
 public void testNextIntWideRange() {
   int lower = -0x6543210F;
   int upper = 0x456789AB;
   int max = Integer.MIN_VALUE;
   int min = Integer.MAX_VALUE;
   for (int i = 0; i < 1000000; ++i) {
     int r = randomData.nextInt(lower, upper);
     max = FastMath.max(max, r);
     min = FastMath.min(min, r);
     Assert.assertTrue(r >= lower);
     Assert.assertTrue(r <= upper);
   }
   double ratio = (((double) max) - ((double) min)) / (((double) upper) - ((double) lower));
   Assert.assertTrue(ratio > 0.99999);
 }
Esempio n. 8
0
  /**
   * Returns the value of Δ(p) + Δ(q) - Δ(p + q), with p, q ≥ 10. Based on the <em>NSWC Library of
   * Mathematics Subroutines</em> double precision implementation, {@code DBCORR}. In {@code
   * BetaTest.testSumDeltaMinusDeltaSum()}, this private method is accessed through reflection.
   *
   * @param p First argument.
   * @param q Second argument.
   * @return the value of {@code Delta(p) + Delta(q) - Delta(p + q)}.
   * @throws NumberIsTooSmallException if {@code p < 10.0} or {@code q < 10.0}.
   */
  private static double sumDeltaMinusDeltaSum(final double p, final double q) {

    if (p < 10.0) {
      throw new NumberIsTooSmallException(p, 10.0, true);
    }
    if (q < 10.0) {
      throw new NumberIsTooSmallException(q, 10.0, true);
    }

    final double a = FastMath.min(p, q);
    final double b = FastMath.max(p, q);
    final double sqrtT = 10.0 / a;
    final double t = sqrtT * sqrtT;
    double z = DELTA[DELTA.length - 1];
    for (int i = DELTA.length - 2; i >= 0; i--) {
      z = t * z + DELTA[i];
    }
    return z / a + deltaMinusDeltaSum(a, b);
  }
Esempio n. 9
0
  /**
   * Single constructor.
   *
   * @param centralBodyFrame rotating body frame
   * @param centralBodyRotationRate central body rotation rate (rad/s)
   * @param provider provider for spherical harmonics
   * @param mDailiesOnly if true only M-dailies tesseral harmonics are taken into account for short
   *     periodics
   */
  TesseralContribution(
      final Frame centralBodyFrame,
      final double centralBodyRotationRate,
      final UnnormalizedSphericalHarmonicsProvider provider,
      final boolean mDailiesOnly) {

    // Central body rotating frame
    this.bodyFrame = centralBodyFrame;

    // Save the rotation rate
    this.centralBodyRotationRate = centralBodyRotationRate;

    // Central body rotation period in seconds
    this.bodyPeriod = MathUtils.TWO_PI / centralBodyRotationRate;

    // Provider for spherical harmonics
    this.provider = provider;
    this.maxDegree = provider.getMaxDegree();
    this.maxOrder = provider.getMaxOrder();

    // set the maximum degree order for short periodics
    this.maxDegreeTesseralSP = FastMath.min(maxDegree, MAX_DEGREE_TESSERAL_SP);
    this.maxDegreeMdailyTesseralSP = FastMath.min(maxDegree, MAX_DEGREE_MDAILY_TESSERAL_SP);
    this.maxOrderTesseralSP = FastMath.min(maxOrder, MAX_ORDER_TESSERAL_SP);
    this.maxOrderMdailyTesseralSP = FastMath.min(maxOrder, MAX_ORDER_MDAILY_TESSERAL_SP);

    // set the maximum value for eccentricity power
    this.maxEccPowTesseralSP = MAX_ECCPOWER_SP;
    this.maxEccPowMdailyTesseralSP = FastMath.min(maxDegreeMdailyTesseralSP - 2, MAX_ECCPOWER_SP);
    this.jMax = FastMath.min(MAXJ, maxDegreeTesseralSP + maxEccPowTesseralSP);

    // m-daylies only
    this.mDailiesOnly = mDailiesOnly;

    // Initialize default values
    this.resOrders = new ArrayList<Integer>();
    this.nonResOrders = new TreeMap<Integer, List<Integer>>();
    this.maxEccPow = 0;
    this.maxHansen = 0;

    // Factorials computation
    final int maxFact = 2 * maxDegree + 1;
    this.fact = new double[maxFact];
    fact[0] = 1;
    for (int i = 1; i < maxFact; i++) {
      fact[i] = i * fact[i - 1];
    }
  }
  /**
   * Calculates the compact Singular Value Decomposition of the given matrix.
   *
   * @param matrix Matrix to decompose.
   */
  public SingularValueDecomposition(final RealMatrix matrix) {
    final double[][] A;

    // "m" is always the largest dimension.
    if (matrix.getRowDimension() < matrix.getColumnDimension()) {
      transposed = true;
      A = matrix.transpose().getData();
      m = matrix.getColumnDimension();
      n = matrix.getRowDimension();
    } else {
      transposed = false;
      A = matrix.getData();
      m = matrix.getRowDimension();
      n = matrix.getColumnDimension();
    }

    singularValues = new double[n];
    final double[][] U = new double[m][n];
    final double[][] V = new double[n][n];
    final double[] e = new double[n];
    final double[] work = new double[m];
    // Reduce A to bidiagonal form, storing the diagonal elements
    // in s and the super-diagonal elements in e.
    final int nct = FastMath.min(m - 1, n);
    final int nrt = FastMath.max(0, n - 2);
    for (int k = 0; k < FastMath.max(nct, nrt); k++) {
      if (k < nct) {
        // Compute the transformation for the k-th column and
        // place the k-th diagonal in s[k].
        // Compute 2-norm of k-th column without under/overflow.
        singularValues[k] = 0;
        for (int i = k; i < m; i++) {
          singularValues[k] = FastMath.hypot(singularValues[k], A[i][k]);
        }
        if (singularValues[k] != 0) {
          if (A[k][k] < 0) {
            singularValues[k] = -singularValues[k];
          }
          for (int i = k; i < m; i++) {
            A[i][k] /= singularValues[k];
          }
          A[k][k] += 1;
        }
        singularValues[k] = -singularValues[k];
      }
      for (int j = k + 1; j < n; j++) {
        if (k < nct && singularValues[k] != 0) {
          // Apply the transformation.
          double t = 0;
          for (int i = k; i < m; i++) {
            t += A[i][k] * A[i][j];
          }
          t = -t / A[k][k];
          for (int i = k; i < m; i++) {
            A[i][j] += t * A[i][k];
          }
        }
        // Place the k-th row of A into e for the
        // subsequent calculation of the row transformation.
        e[j] = A[k][j];
      }
      if (k < nct) {
        // Place the transformation in U for subsequent back
        // multiplication.
        for (int i = k; i < m; i++) {
          U[i][k] = A[i][k];
        }
      }
      if (k < nrt) {
        // Compute the k-th row transformation and place the
        // k-th super-diagonal in e[k].
        // Compute 2-norm without under/overflow.
        e[k] = 0;
        for (int i = k + 1; i < n; i++) {
          e[k] = FastMath.hypot(e[k], e[i]);
        }
        if (e[k] != 0) {
          if (e[k + 1] < 0) {
            e[k] = -e[k];
          }
          for (int i = k + 1; i < n; i++) {
            e[i] /= e[k];
          }
          e[k + 1] += 1;
        }
        e[k] = -e[k];
        if (k + 1 < m && e[k] != 0) {
          // Apply the transformation.
          for (int i = k + 1; i < m; i++) {
            work[i] = 0;
          }
          for (int j = k + 1; j < n; j++) {
            for (int i = k + 1; i < m; i++) {
              work[i] += e[j] * A[i][j];
            }
          }
          for (int j = k + 1; j < n; j++) {
            final double t = -e[j] / e[k + 1];
            for (int i = k + 1; i < m; i++) {
              A[i][j] += t * work[i];
            }
          }
        }

        // Place the transformation in V for subsequent
        // back multiplication.
        for (int i = k + 1; i < n; i++) {
          V[i][k] = e[i];
        }
      }
    }
    // Set up the final bidiagonal matrix or order p.
    int p = n;
    if (nct < n) {
      singularValues[nct] = A[nct][nct];
    }
    if (m < p) {
      singularValues[p - 1] = 0;
    }
    if (nrt + 1 < p) {
      e[nrt] = A[nrt][p - 1];
    }
    e[p - 1] = 0;

    // Generate U.
    for (int j = nct; j < n; j++) {
      for (int i = 0; i < m; i++) {
        U[i][j] = 0;
      }
      U[j][j] = 1;
    }
    for (int k = nct - 1; k >= 0; k--) {
      if (singularValues[k] != 0) {
        for (int j = k + 1; j < n; j++) {
          double t = 0;
          for (int i = k; i < m; i++) {
            t += U[i][k] * U[i][j];
          }
          t = -t / U[k][k];
          for (int i = k; i < m; i++) {
            U[i][j] += t * U[i][k];
          }
        }
        for (int i = k; i < m; i++) {
          U[i][k] = -U[i][k];
        }
        U[k][k] = 1 + U[k][k];
        for (int i = 0; i < k - 1; i++) {
          U[i][k] = 0;
        }
      } else {
        for (int i = 0; i < m; i++) {
          U[i][k] = 0;
        }
        U[k][k] = 1;
      }
    }

    // Generate V.
    for (int k = n - 1; k >= 0; k--) {
      if (k < nrt && e[k] != 0) {
        for (int j = k + 1; j < n; j++) {
          double t = 0;
          for (int i = k + 1; i < n; i++) {
            t += V[i][k] * V[i][j];
          }
          t = -t / V[k + 1][k];
          for (int i = k + 1; i < n; i++) {
            V[i][j] += t * V[i][k];
          }
        }
      }
      for (int i = 0; i < n; i++) {
        V[i][k] = 0;
      }
      V[k][k] = 1;
    }

    // Main iteration loop for the singular values.
    final int pp = p - 1;
    int iter = 0;
    while (p > 0) {
      int k;
      int kase;
      // Here is where a test for too many iterations would go.
      // This section of the program inspects for
      // negligible elements in the s and e arrays.  On
      // completion the variables kase and k are set as follows.
      // kase = 1     if s(p) and e[k-1] are negligible and k<p
      // kase = 2     if s(k) is negligible and k<p
      // kase = 3     if e[k-1] is negligible, k<p, and
      //              s(k), ..., s(p) are not negligible (qr step).
      // kase = 4     if e(p-1) is negligible (convergence).
      for (k = p - 2; k >= 0; k--) {
        final double threshold =
            TINY + EPS * (FastMath.abs(singularValues[k]) + FastMath.abs(singularValues[k + 1]));

        // the following condition is written this way in order
        // to break out of the loop when NaN occurs, writing it
        // as "if (FastMath.abs(e[k]) <= threshold)" would loop
        // indefinitely in case of NaNs because comparison on NaNs
        // always return false, regardless of what is checked
        // see issue MATH-947
        if (!(FastMath.abs(e[k]) > threshold)) {
          e[k] = 0;
          break;
        }
      }

      if (k == p - 2) {
        kase = 4;
      } else {
        int ks;
        for (ks = p - 1; ks >= k; ks--) {
          if (ks == k) {
            break;
          }
          final double t =
              (ks != p ? FastMath.abs(e[ks]) : 0) + (ks != k + 1 ? FastMath.abs(e[ks - 1]) : 0);
          if (FastMath.abs(singularValues[ks]) <= TINY + EPS * t) {
            singularValues[ks] = 0;
            break;
          }
        }
        if (ks == k) {
          kase = 3;
        } else if (ks == p - 1) {
          kase = 1;
        } else {
          kase = 2;
          k = ks;
        }
      }
      k++;
      // Perform the task indicated by kase.
      switch (kase) {
          // Deflate negligible s(p).
        case 1:
          {
            double f = e[p - 2];
            e[p - 2] = 0;
            for (int j = p - 2; j >= k; j--) {
              double t = FastMath.hypot(singularValues[j], f);
              final double cs = singularValues[j] / t;
              final double sn = f / t;
              singularValues[j] = t;
              if (j != k) {
                f = -sn * e[j - 1];
                e[j - 1] = cs * e[j - 1];
              }

              for (int i = 0; i < n; i++) {
                t = cs * V[i][j] + sn * V[i][p - 1];
                V[i][p - 1] = -sn * V[i][j] + cs * V[i][p - 1];
                V[i][j] = t;
              }
            }
          }
          break;
          // Split at negligible s(k).
        case 2:
          {
            double f = e[k - 1];
            e[k - 1] = 0;
            for (int j = k; j < p; j++) {
              double t = FastMath.hypot(singularValues[j], f);
              final double cs = singularValues[j] / t;
              final double sn = f / t;
              singularValues[j] = t;
              f = -sn * e[j];
              e[j] = cs * e[j];

              for (int i = 0; i < m; i++) {
                t = cs * U[i][j] + sn * U[i][k - 1];
                U[i][k - 1] = -sn * U[i][j] + cs * U[i][k - 1];
                U[i][j] = t;
              }
            }
          }
          break;
          // Perform one qr step.
        case 3:
          {
            // Calculate the shift.
            final double maxPm1Pm2 =
                FastMath.max(
                    FastMath.abs(singularValues[p - 1]), FastMath.abs(singularValues[p - 2]));
            final double scale =
                FastMath.max(
                    FastMath.max(
                        FastMath.max(maxPm1Pm2, FastMath.abs(e[p - 2])),
                        FastMath.abs(singularValues[k])),
                    FastMath.abs(e[k]));
            final double sp = singularValues[p - 1] / scale;
            final double spm1 = singularValues[p - 2] / scale;
            final double epm1 = e[p - 2] / scale;
            final double sk = singularValues[k] / scale;
            final double ek = e[k] / scale;
            final double b = ((spm1 + sp) * (spm1 - sp) + epm1 * epm1) / 2.0;
            final double c = (sp * epm1) * (sp * epm1);
            double shift = 0;
            if (b != 0 || c != 0) {
              shift = FastMath.sqrt(b * b + c);
              if (b < 0) {
                shift = -shift;
              }
              shift = c / (b + shift);
            }
            double f = (sk + sp) * (sk - sp) + shift;
            double g = sk * ek;
            // Chase zeros.
            for (int j = k; j < p - 1; j++) {
              double t = FastMath.hypot(f, g);
              double cs = f / t;
              double sn = g / t;
              if (j != k) {
                e[j - 1] = t;
              }
              f = cs * singularValues[j] + sn * e[j];
              e[j] = cs * e[j] - sn * singularValues[j];
              g = sn * singularValues[j + 1];
              singularValues[j + 1] = cs * singularValues[j + 1];

              for (int i = 0; i < n; i++) {
                t = cs * V[i][j] + sn * V[i][j + 1];
                V[i][j + 1] = -sn * V[i][j] + cs * V[i][j + 1];
                V[i][j] = t;
              }
              t = FastMath.hypot(f, g);
              cs = f / t;
              sn = g / t;
              singularValues[j] = t;
              f = cs * e[j] + sn * singularValues[j + 1];
              singularValues[j + 1] = -sn * e[j] + cs * singularValues[j + 1];
              g = sn * e[j + 1];
              e[j + 1] = cs * e[j + 1];
              if (j < m - 1) {
                for (int i = 0; i < m; i++) {
                  t = cs * U[i][j] + sn * U[i][j + 1];
                  U[i][j + 1] = -sn * U[i][j] + cs * U[i][j + 1];
                  U[i][j] = t;
                }
              }
            }
            e[p - 2] = f;
            iter = iter + 1;
          }
          break;
          // Convergence.
        default:
          {
            // Make the singular values positive.
            if (singularValues[k] <= 0) {
              singularValues[k] = singularValues[k] < 0 ? -singularValues[k] : 0;

              for (int i = 0; i <= pp; i++) {
                V[i][k] = -V[i][k];
              }
            }
            // Order the singular values.
            while (k < pp) {
              if (singularValues[k] >= singularValues[k + 1]) {
                break;
              }
              double t = singularValues[k];
              singularValues[k] = singularValues[k + 1];
              singularValues[k + 1] = t;
              if (k < n - 1) {
                for (int i = 0; i < n; i++) {
                  t = V[i][k + 1];
                  V[i][k + 1] = V[i][k];
                  V[i][k] = t;
                }
              }
              if (k < m - 1) {
                for (int i = 0; i < m; i++) {
                  t = U[i][k + 1];
                  U[i][k + 1] = U[i][k];
                  U[i][k] = t;
                }
              }
              k++;
            }
            iter = 0;
            p--;
          }
          break;
      }
    }

    // Set the small value tolerance used to calculate rank and pseudo-inverse
    tol = FastMath.max(m * singularValues[0] * EPS, FastMath.sqrt(Precision.SAFE_MIN));

    if (!transposed) {
      cachedU = MatrixUtils.createRealMatrix(U);
      cachedV = MatrixUtils.createRealMatrix(V);
    } else {
      cachedU = MatrixUtils.createRealMatrix(V);
      cachedV = MatrixUtils.createRealMatrix(U);
    }
  }
  /** {@inheritDoc} */
  @Override
  public void integrate(final ExpandableStatefulODE equations, final double t)
      throws MathIllegalStateException, MathIllegalArgumentException {

    sanityChecks(equations, t);
    setEquations(equations);
    final boolean forward = t > equations.getTime();

    // create some internal working arrays
    final double[] y0 = equations.getCompleteState();
    final double[] y = y0.clone();
    final int stages = c.length + 1;
    final double[][] yDotK = new double[stages][y.length];
    final double[] yTmp = y0.clone();
    final double[] yDotTmp = new double[y.length];

    // set up an interpolator sharing the integrator arrays
    final RungeKuttaStepInterpolator interpolator = (RungeKuttaStepInterpolator) prototype.copy();
    interpolator.reinitialize(
        this, yTmp, yDotK, forward, equations.getPrimaryMapper(), equations.getSecondaryMappers());
    interpolator.storeTime(equations.getTime());

    // set up integration control objects
    stepStart = equations.getTime();
    double hNew = 0;
    boolean firstTime = true;
    initIntegration(equations.getTime(), y0, t);

    // main integration loop
    isLastStep = false;
    do {

      interpolator.shift();

      // iterate over step size, ensuring local normalized error is smaller than 1
      double error = 10;
      while (error >= 1.0) {

        if (firstTime || !fsal) {
          // first stage
          computeDerivatives(stepStart, y, yDotK[0]);
        }

        if (firstTime) {
          final double[] scale = new double[mainSetDimension];
          if (vecAbsoluteTolerance == null) {
            for (int i = 0; i < scale.length; ++i) {
              scale[i] = scalAbsoluteTolerance + scalRelativeTolerance * FastMath.abs(y[i]);
            }
          } else {
            for (int i = 0; i < scale.length; ++i) {
              scale[i] = vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * FastMath.abs(y[i]);
            }
          }
          hNew = initializeStep(forward, getOrder(), scale, stepStart, y, yDotK[0], yTmp, yDotK[1]);
          firstTime = false;
        }

        stepSize = hNew;
        if (forward) {
          if (stepStart + stepSize >= t) {
            stepSize = t - stepStart;
          }
        } else {
          if (stepStart + stepSize <= t) {
            stepSize = t - stepStart;
          }
        }

        // next stages
        for (int k = 1; k < stages; ++k) {

          for (int j = 0; j < y0.length; ++j) {
            double sum = a[k - 1][0] * yDotK[0][j];
            for (int l = 1; l < k; ++l) {
              sum += a[k - 1][l] * yDotK[l][j];
            }
            yTmp[j] = y[j] + stepSize * sum;
          }

          computeDerivatives(stepStart + c[k - 1] * stepSize, yTmp, yDotK[k]);
        }

        // estimate the state at the end of the step
        for (int j = 0; j < y0.length; ++j) {
          double sum = b[0] * yDotK[0][j];
          for (int l = 1; l < stages; ++l) {
            sum += b[l] * yDotK[l][j];
          }
          yTmp[j] = y[j] + stepSize * sum;
        }

        // estimate the error at the end of the step
        error = estimateError(yDotK, y, yTmp, stepSize);
        if (error >= 1.0) {
          // reject the step and attempt to reduce error by stepsize control
          final double factor =
              FastMath.min(
                  maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
          hNew = filterStep(stepSize * factor, forward, false);
        }
      }

      // local error is small enough: accept the step, trigger events and step handlers
      interpolator.storeTime(stepStart + stepSize);
      System.arraycopy(yTmp, 0, y, 0, y0.length);
      System.arraycopy(yDotK[stages - 1], 0, yDotTmp, 0, y0.length);
      stepStart = acceptStep(interpolator, y, yDotTmp, t);
      System.arraycopy(y, 0, yTmp, 0, y.length);

      if (!isLastStep) {

        // prepare next step
        interpolator.storeTime(stepStart);

        if (fsal) {
          // save the last evaluation for the next step
          System.arraycopy(yDotTmp, 0, yDotK[0], 0, y0.length);
        }

        // stepsize control for next step
        final double factor =
            FastMath.min(maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
        final double scaledH = stepSize * factor;
        final double nextT = stepStart + scaledH;
        final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
        hNew = filterStep(scaledH, forward, nextIsLast);

        final double filteredNextT = stepStart + hNew;
        final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t);
        if (filteredNextIsLast) {
          hNew = t - stepStart;
        }
      }

    } while (!isLastStep);

    // dispatch results
    equations.setTime(stepStart);
    equations.setCompleteState(y);

    resetInternalState();
  }
  /**
   * Determines the Levenberg-Marquardt parameter.
   *
   * <p>This implementation is a translation in Java of the MINPACK <a
   * href="http://www.netlib.org/minpack/lmpar.f">lmpar</a> routine.
   *
   * <p>This method sets the lmPar and lmDir attributes.
   *
   * <p>The authors of the original fortran function are:
   *
   * <ul>
   *   <li>Argonne National Laboratory. MINPACK project. March 1980
   *   <li>Burton S. Garbow
   *   <li>Kenneth E. Hillstrom
   *   <li>Jorge J. More
   * </ul>
   *
   * <p>Luc Maisonobe did the Java translation.
   *
   * @param qy Array containing qTy.
   * @param delta Upper bound on the euclidean norm of diagR * lmDir.
   * @param diag Diagonal matrix.
   * @param internalData Data (modified in-place in this method).
   * @param solvedCols Number of solved point.
   * @param work1 work array
   * @param work2 work array
   * @param work3 work array
   * @param lmDir the "returned" LM direction will be stored in this array.
   * @param lmPar the value of the LM parameter from the previous iteration.
   * @return the new LM parameter
   */
  private double determineLMParameter(
      double[] qy,
      double delta,
      double[] diag,
      InternalData internalData,
      int solvedCols,
      double[] work1,
      double[] work2,
      double[] work3,
      double[] lmDir,
      double lmPar) {
    final double[][] weightedJacobian = internalData.weightedJacobian;
    final int[] permutation = internalData.permutation;
    final int rank = internalData.rank;
    final double[] diagR = internalData.diagR;

    final int nC = weightedJacobian[0].length;

    // compute and store in x the gauss-newton direction, if the
    // jacobian is rank-deficient, obtain a least squares solution
    for (int j = 0; j < rank; ++j) {
      lmDir[permutation[j]] = qy[j];
    }
    for (int j = rank; j < nC; ++j) {
      lmDir[permutation[j]] = 0;
    }
    for (int k = rank - 1; k >= 0; --k) {
      int pk = permutation[k];
      double ypk = lmDir[pk] / diagR[pk];
      for (int i = 0; i < k; ++i) {
        lmDir[permutation[i]] -= ypk * weightedJacobian[i][pk];
      }
      lmDir[pk] = ypk;
    }

    // evaluate the function at the origin, and test
    // for acceptance of the Gauss-Newton direction
    double dxNorm = 0;
    for (int j = 0; j < solvedCols; ++j) {
      int pj = permutation[j];
      double s = diag[pj] * lmDir[pj];
      work1[pj] = s;
      dxNorm += s * s;
    }
    dxNorm = FastMath.sqrt(dxNorm);
    double fp = dxNorm - delta;
    if (fp <= 0.1 * delta) {
      lmPar = 0;
      return lmPar;
    }

    // if the jacobian is not rank deficient, the Newton step provides
    // a lower bound, parl, for the zero of the function,
    // otherwise set this bound to zero
    double sum2;
    double parl = 0;
    if (rank == solvedCols) {
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] *= diag[pj] / dxNorm;
      }
      sum2 = 0;
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        double sum = 0;
        for (int i = 0; i < j; ++i) {
          sum += weightedJacobian[i][pj] * work1[permutation[i]];
        }
        double s = (work1[pj] - sum) / diagR[pj];
        work1[pj] = s;
        sum2 += s * s;
      }
      parl = fp / (delta * sum2);
    }

    // calculate an upper bound, paru, for the zero of the function
    sum2 = 0;
    for (int j = 0; j < solvedCols; ++j) {
      int pj = permutation[j];
      double sum = 0;
      for (int i = 0; i <= j; ++i) {
        sum += weightedJacobian[i][pj] * qy[i];
      }
      sum /= diag[pj];
      sum2 += sum * sum;
    }
    double gNorm = FastMath.sqrt(sum2);
    double paru = gNorm / delta;
    if (paru == 0) {
      paru = Precision.SAFE_MIN / FastMath.min(delta, 0.1);
    }

    // if the input par lies outside of the interval (parl,paru),
    // set par to the closer endpoint
    lmPar = FastMath.min(paru, FastMath.max(lmPar, parl));
    if (lmPar == 0) {
      lmPar = gNorm / dxNorm;
    }

    for (int countdown = 10; countdown >= 0; --countdown) {

      // evaluate the function at the current value of lmPar
      if (lmPar == 0) {
        lmPar = FastMath.max(Precision.SAFE_MIN, 0.001 * paru);
      }
      double sPar = FastMath.sqrt(lmPar);
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] = sPar * diag[pj];
      }
      determineLMDirection(qy, work1, work2, internalData, solvedCols, work3, lmDir);

      dxNorm = 0;
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        double s = diag[pj] * lmDir[pj];
        work3[pj] = s;
        dxNorm += s * s;
      }
      dxNorm = FastMath.sqrt(dxNorm);
      double previousFP = fp;
      fp = dxNorm - delta;

      // if the function is small enough, accept the current value
      // of lmPar, also test for the exceptional cases where parl is zero
      if (FastMath.abs(fp) <= 0.1 * delta || (parl == 0 && fp <= previousFP && previousFP < 0)) {
        return lmPar;
      }

      // compute the Newton correction
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] = work3[pj] * diag[pj] / dxNorm;
      }
      for (int j = 0; j < solvedCols; ++j) {
        int pj = permutation[j];
        work1[pj] /= work2[j];
        double tmp = work1[pj];
        for (int i = j + 1; i < solvedCols; ++i) {
          work1[permutation[i]] -= weightedJacobian[i][pj] * tmp;
        }
      }
      sum2 = 0;
      for (int j = 0; j < solvedCols; ++j) {
        double s = work1[permutation[j]];
        sum2 += s * s;
      }
      double correction = fp / (delta * sum2);

      // depending on the sign of the function, update parl or paru.
      if (fp > 0) {
        parl = FastMath.max(parl, lmPar);
      } else if (fp < 0) {
        paru = FastMath.min(paru, lmPar);
      }

      // compute an improved estimate for lmPar
      lmPar = FastMath.max(parl, lmPar + correction);
    }

    return lmPar;
  }
Esempio n. 13
0
  /**
   * Returns the value of log B(p, q) for 0 ≤ x ≤ 1 and p, q > 0. Based on the <em>NSWC Library of
   * Mathematics Subroutines</em> implementation, {@code DBETLN}.
   *
   * @param p First argument.
   * @param q Second argument.
   * @return the value of {@code log(Beta(p, q))}, {@code NaN} if {@code p <= 0} or {@code q <= 0}.
   */
  public static double logBeta(final double p, final double q) {
    if (Double.isNaN(p) || Double.isNaN(q) || (p <= 0.0) || (q <= 0.0)) {
      return Double.NaN;
    }

    final double a = FastMath.min(p, q);
    final double b = FastMath.max(p, q);
    if (a >= 10.0) {
      final double w = sumDeltaMinusDeltaSum(a, b);
      final double h = a / b;
      final double c = h / (1.0 + h);
      final double u = -(a - 0.5) * FastMath.log(c);
      final double v = b * FastMath.log1p(h);
      if (u <= v) {
        return (((-0.5 * FastMath.log(b) + HALF_LOG_TWO_PI) + w) - u) - v;
      } else {
        return (((-0.5 * FastMath.log(b) + HALF_LOG_TWO_PI) + w) - v) - u;
      }
    } else if (a > 2.0) {
      if (b > 1000.0) {
        final int n = (int) FastMath.floor(a - 1.0);
        double prod = 1.0;
        double ared = a;
        for (int i = 0; i < n; i++) {
          ared -= 1.0;
          prod *= ared / (1.0 + ared / b);
        }
        return (FastMath.log(prod) - n * FastMath.log(b))
            + (Gamma.logGamma(ared) + logGammaMinusLogGammaSum(ared, b));
      } else {
        double prod1 = 1.0;
        double ared = a;
        while (ared > 2.0) {
          ared -= 1.0;
          final double h = ared / b;
          prod1 *= h / (1.0 + h);
        }
        if (b < 10.0) {
          double prod2 = 1.0;
          double bred = b;
          while (bred > 2.0) {
            bred -= 1.0;
            prod2 *= bred / (ared + bred);
          }
          return FastMath.log(prod1)
              + FastMath.log(prod2)
              + (Gamma.logGamma(ared) + (Gamma.logGamma(bred) - logGammaSum(ared, bred)));
        } else {
          return FastMath.log(prod1) + Gamma.logGamma(ared) + logGammaMinusLogGammaSum(ared, b);
        }
      }
    } else if (a >= 1.0) {
      if (b > 2.0) {
        if (b < 10.0) {
          double prod = 1.0;
          double bred = b;
          while (bred > 2.0) {
            bred -= 1.0;
            prod *= bred / (a + bred);
          }
          return FastMath.log(prod)
              + (Gamma.logGamma(a) + (Gamma.logGamma(bred) - logGammaSum(a, bred)));
        } else {
          return Gamma.logGamma(a) + logGammaMinusLogGammaSum(a, b);
        }
      } else {
        return Gamma.logGamma(a) + Gamma.logGamma(b) - logGammaSum(a, b);
      }
    } else {
      if (b >= 10.0) {
        return Gamma.logGamma(a) + logGammaMinusLogGammaSum(a, b);
      } else {
        // The following command is the original NSWC implementation.
        // return Gamma.logGamma(a) +
        // (Gamma.logGamma(b) - Gamma.logGamma(a + b));
        // The following command turns out to be more accurate.
        return FastMath.log(Gamma.gamma(a) * Gamma.gamma(b) / Gamma.gamma(a + b));
      }
    }
  }
Esempio n. 14
0
  /**
   * Compute the n-SUM for potential derivatives components.
   *
   * @param date current date
   * @param j resonant index <i>j</i>
   * @param m resonant order <i>m</i>
   * @param s d'Alembert characteristic <i>s</i>
   * @param maxN maximum possible value for <i>n</i> index
   * @param roaPow powers of R/a up to degree <i>n</i>
   * @param ghMSJ G<sup>j</sup><sub>m,s</sub> and H<sup>j</sup><sub>m,s</sub> polynomials
   * @param gammaMNS &Gamma;<sup>m</sup><sub>n,s</sub>(γ) function
   * @return Components of U<sub>n</sub> derivatives for fixed j, m, s
   * @throws OrekitException if some error occurred
   */
  private double[][] computeNSum(
      final AbsoluteDate date,
      final int j,
      final int m,
      final int s,
      final int maxN,
      final double[] roaPow,
      final GHmsjPolynomials ghMSJ,
      final GammaMnsFunction gammaMNS)
      throws OrekitException {

    // spherical harmonics
    final UnnormalizedSphericalHarmonics harmonics = provider.onDate(date);

    // Potential derivatives components
    double dUdaCos = 0.;
    double dUdaSin = 0.;
    double dUdhCos = 0.;
    double dUdhSin = 0.;
    double dUdkCos = 0.;
    double dUdkSin = 0.;
    double dUdlCos = 0.;
    double dUdlSin = 0.;
    double dUdAlCos = 0.;
    double dUdAlSin = 0.;
    double dUdBeCos = 0.;
    double dUdBeSin = 0.;
    double dUdGaCos = 0.;
    double dUdGaSin = 0.;

    // I^m
    final int Im = I > 0 ? 1 : (m % 2 == 0 ? 1 : -1);

    // jacobi v, w, indices from 2.7.1-(15)
    final int v = FastMath.abs(m - s);
    final int w = FastMath.abs(m + s);

    // Initialise lower degree nmin = (Max(2, m, |s|)) for summation over n
    final int nmin = FastMath.max(FastMath.max(2, m), FastMath.abs(s));

    // Get the corresponding Hansen object
    final int sIndex = maxDegree + (j < 0 ? -s : s);
    final int jIndex = FastMath.abs(j);
    final HansenTesseralLinear hans = this.hansenObjects[sIndex][jIndex];

    // n-SUM from nmin to N
    for (int n = nmin; n <= maxN; n++) {
      // If (n - s) is odd, the contribution is null because of Vmns
      if ((n - s) % 2 == 0) {

        // Vmns coefficient
        final double fns = fact[n + FastMath.abs(s)];
        final double vMNS = CoefficientsFactory.getVmns(m, n, s, fns, fact[n - m]);

        // Inclination function Gamma and derivative
        final double gaMNS = gammaMNS.getValue(m, n, s);
        final double dGaMNS = gammaMNS.getDerivative(m, n, s);

        // Hansen kernel value and derivative
        final double kJNS = hans.getValue(-n - 1, chi);
        final double dkJNS = hans.getDerivative(-n - 1, chi);

        // Gjms, Hjms polynomials and derivatives
        final double gMSJ = ghMSJ.getGmsj(m, s, j);
        final double hMSJ = ghMSJ.getHmsj(m, s, j);
        final double dGdh = ghMSJ.getdGmsdh(m, s, j);
        final double dGdk = ghMSJ.getdGmsdk(m, s, j);
        final double dGdA = ghMSJ.getdGmsdAlpha(m, s, j);
        final double dGdB = ghMSJ.getdGmsdBeta(m, s, j);
        final double dHdh = ghMSJ.getdHmsdh(m, s, j);
        final double dHdk = ghMSJ.getdHmsdk(m, s, j);
        final double dHdA = ghMSJ.getdHmsdAlpha(m, s, j);
        final double dHdB = ghMSJ.getdHmsdBeta(m, s, j);

        // Jacobi l-index from 2.7.1-(15)
        final int l = FastMath.min(n - m, n - FastMath.abs(s));
        // Jacobi polynomial and derivative
        final DerivativeStructure jacobi =
            JacobiPolynomials.getValue(l, v, w, new DerivativeStructure(1, 1, 0, gamma));

        // Geopotential coefficients
        final double cnm = harmonics.getUnnormalizedCnm(n, m);
        final double snm = harmonics.getUnnormalizedSnm(n, m);

        // Common factors from expansion of equations 3.3-4
        final double cf_0 = roaPow[n] * Im * vMNS;
        final double cf_1 = cf_0 * gaMNS * jacobi.getValue();
        final double cf_2 = cf_1 * kJNS;
        final double gcPhs = gMSJ * cnm + hMSJ * snm;
        final double gsMhc = gMSJ * snm - hMSJ * cnm;
        final double dKgcPhsx2 = 2. * dkJNS * gcPhs;
        final double dKgsMhcx2 = 2. * dkJNS * gsMhc;
        final double dUdaCoef = (n + 1) * cf_2;
        final double dUdlCoef = j * cf_2;
        final double dUdGaCoef =
            cf_0 * kJNS * (jacobi.getValue() * dGaMNS + gaMNS * jacobi.getPartialDerivative(1));

        // dU / da components
        dUdaCos += dUdaCoef * gcPhs;
        dUdaSin += dUdaCoef * gsMhc;

        // dU / dh components
        dUdhCos += cf_1 * (kJNS * (cnm * dGdh + snm * dHdh) + h * dKgcPhsx2);
        dUdhSin += cf_1 * (kJNS * (snm * dGdh - cnm * dHdh) + h * dKgsMhcx2);

        // dU / dk components
        dUdkCos += cf_1 * (kJNS * (cnm * dGdk + snm * dHdk) + k * dKgcPhsx2);
        dUdkSin += cf_1 * (kJNS * (snm * dGdk - cnm * dHdk) + k * dKgsMhcx2);

        // dU / dLambda components
        dUdlCos += dUdlCoef * gsMhc;
        dUdlSin += -dUdlCoef * gcPhs;

        // dU / alpha components
        dUdAlCos += cf_2 * (dGdA * cnm + dHdA * snm);
        dUdAlSin += cf_2 * (dGdA * snm - dHdA * cnm);

        // dU / dBeta components
        dUdBeCos += cf_2 * (dGdB * cnm + dHdB * snm);
        dUdBeSin += cf_2 * (dGdB * snm - dHdB * cnm);

        // dU / dGamma components
        dUdGaCos += dUdGaCoef * gcPhs;
        dUdGaSin += dUdGaCoef * gsMhc;
      }
    }

    return new double[][] {
      {dUdaCos, dUdaSin},
      {dUdhCos, dUdhSin},
      {dUdkCos, dUdkSin},
      {dUdlCos, dUdlSin},
      {dUdAlCos, dUdAlSin},
      {dUdBeCos, dUdBeSin},
      {dUdGaCos, dUdGaSin}
    };
  }
  private void doParallelUpload(String bucket, String key, File file, ObjectMetadata metadata)
      throws IOException {

    long contentLength = file.length();
    long numParts = (contentLength + PARALLEL_UPLOAD_SIZE - 1) / PARALLEL_UPLOAD_SIZE;
    long partSize = (contentLength / numParts) + 1;

    log.info("Uploading {} in {} parts with part size {}", file, numParts, partSize);

    final List<PartETag> partETags = Lists.newArrayList();
    InitiateMultipartUploadRequest initRequest =
        new InitiateMultipartUploadRequest(bucket, key, metadata);
    InitiateMultipartUploadResult initResponse = s3Client.initiateMultipartUpload(initRequest);
    String uploadID = initResponse.getUploadId();

    try {
      long filePosition = 0;
      int partNumber = 1;
      Collection<Future<?>> futures = Lists.newArrayList();
      while (filePosition < contentLength) {

        final UploadPartRequest uploadRequest =
            new UploadPartRequest()
                .withBucketName(bucket)
                .withKey(key)
                .withUploadId(uploadID)
                .withPartNumber(partNumber)
                .withFileOffset(filePosition)
                .withFile(file)
                .withPartSize(FastMath.min(partSize, contentLength - filePosition));

        futures.add(
            executorService.submit(
                new Callable<Object>() {
                  @Override
                  public Object call() {
                    String theKey = uploadRequest.getKey();
                    int thePartNumber = uploadRequest.getPartNumber();
                    log.info("Starting {} part {}", theKey, thePartNumber);
                    int failures = 0;
                    UploadPartResult uploadPartResult;
                    while (true) {
                      try {
                        uploadPartResult = s3Client.uploadPart(uploadRequest);
                        break;
                      } catch (AmazonClientException ace) {
                        if (++failures >= MAX_RETRIES) {
                          throw ace;
                        }
                        log.warn(
                            "Upload {} part {} failed ({}); retrying",
                            theKey,
                            thePartNumber,
                            ace.getMessage());
                      }
                    }
                    partETags.add(uploadPartResult.getPartETag());
                    log.info("Finished {} part {}", theKey, thePartNumber);
                    return null;
                  }
                }));

        filePosition += partSize;
        partNumber++;
      }

      for (Future<?> future : futures) {
        try {
          future.get();
        } catch (InterruptedException e) {
          throw new IOException(e);
        } catch (ExecutionException e) {
          throw new IOException(e.getCause());
        }
      }

      CompleteMultipartUploadRequest completeRequest =
          new CompleteMultipartUploadRequest(bucket, key, uploadID, partETags);
      s3Client.completeMultipartUpload(completeRequest);

    } catch (AmazonClientException ace) {
      AbortMultipartUploadRequest abortRequest =
          new AbortMultipartUploadRequest(bucket, key, uploadID);
      s3Client.abortMultipartUpload(abortRequest);
      throw ace;
    }
  }
Esempio n. 16
0
 /**
  * {@inheritDoc}
  *
  * <p>For number of successes {@code m} and sample size {@code n}, the upper bound of the support
  * is {@code min(m, n)}.
  *
  * @return upper bound of the support
  */
 public int getSupportUpperBound() {
   return FastMath.min(getNumberOfSuccesses(), getSampleSize());
 }
Esempio n. 17
0
 /**
  * Return the highest domain value for the given hypergeometric distribution parameters.
  *
  * @param m Number of successes in the population.
  * @param k Sample size.
  * @return the highest domain value of the hypergeometric distribution.
  */
 private int getUpperDomain(int m, int k) {
   return FastMath.min(k, m);
 }
Esempio n. 18
0
  /**
   * Computes the potential U derivatives.
   *
   * <p>The following elements are computed from expression 3.3 - (4).
   *
   * <pre>
   *  dU / da
   *  dU / dh
   *  dU / dk
   *  dU / dλ
   *  dU / dα
   *  dU / dβ
   *  dU / dγ
   *  </pre>
   *
   * @param date current date
   * @return potential derivatives
   * @throws OrekitException if an error occurs
   */
  private double[] computeUDerivatives(final AbsoluteDate date) throws OrekitException {

    // Potential derivatives
    double dUda = 0.;
    double dUdh = 0.;
    double dUdk = 0.;
    double dUdl = 0.;
    double dUdAl = 0.;
    double dUdBe = 0.;
    double dUdGa = 0.;

    // Compute only if there is at least one resonant tesseral
    if (!resOrders.isEmpty()) {
      // Gmsj and Hmsj polynomials
      final GHmsjPolynomials ghMSJ = new GHmsjPolynomials(k, h, alpha, beta, I);

      // GAMMAmns function
      final GammaMnsFunction gammaMNS = new GammaMnsFunction(fact, gamma, I);

      // R / a up to power degree
      final double[] roaPow = new double[maxDegree + 1];
      roaPow[0] = 1.;
      for (int i = 1; i <= maxDegree; i++) {
        roaPow[i] = roa * roaPow[i - 1];
      }

      // SUM over resonant terms {j,m}
      for (int m : resOrders) {

        // Resonant index for the current resonant order
        final int j = FastMath.max(1, (int) FastMath.round(ratio * m));

        // Phase angle
        final double jlMmt = j * lm - m * theta;
        final double sinPhi = FastMath.sin(jlMmt);
        final double cosPhi = FastMath.cos(jlMmt);

        // Potential derivatives components for a given resonant pair {j,m}
        double dUdaCos = 0.;
        double dUdaSin = 0.;
        double dUdhCos = 0.;
        double dUdhSin = 0.;
        double dUdkCos = 0.;
        double dUdkSin = 0.;
        double dUdlCos = 0.;
        double dUdlSin = 0.;
        double dUdAlCos = 0.;
        double dUdAlSin = 0.;
        double dUdBeCos = 0.;
        double dUdBeSin = 0.;
        double dUdGaCos = 0.;
        double dUdGaSin = 0.;

        // s-SUM from -sMin to sMax
        final int sMin = FastMath.min(maxEccPow - j, maxDegree);
        final int sMax = FastMath.min(maxEccPow + j, maxDegree);
        for (int s = 0; s <= sMax; s++) {

          // Compute the initial values for Hansen coefficients using newComb operators
          this.hansenObjects[s + maxDegree][j].computeInitValues(e2, chi, chi2);

          // n-SUM for s positive
          final double[][] nSumSpos =
              computeNSum(date, j, m, s, maxDegree, roaPow, ghMSJ, gammaMNS);
          dUdaCos += nSumSpos[0][0];
          dUdaSin += nSumSpos[0][1];
          dUdhCos += nSumSpos[1][0];
          dUdhSin += nSumSpos[1][1];
          dUdkCos += nSumSpos[2][0];
          dUdkSin += nSumSpos[2][1];
          dUdlCos += nSumSpos[3][0];
          dUdlSin += nSumSpos[3][1];
          dUdAlCos += nSumSpos[4][0];
          dUdAlSin += nSumSpos[4][1];
          dUdBeCos += nSumSpos[5][0];
          dUdBeSin += nSumSpos[5][1];
          dUdGaCos += nSumSpos[6][0];
          dUdGaSin += nSumSpos[6][1];

          // n-SUM for s negative
          if (s > 0 && s <= sMin) {
            // Compute the initial values for Hansen coefficients using newComb operators
            this.hansenObjects[maxDegree - s][j].computeInitValues(e2, chi, chi2);

            final double[][] nSumSneg =
                computeNSum(date, j, m, -s, maxDegree, roaPow, ghMSJ, gammaMNS);
            dUdaCos += nSumSneg[0][0];
            dUdaSin += nSumSneg[0][1];
            dUdhCos += nSumSneg[1][0];
            dUdhSin += nSumSneg[1][1];
            dUdkCos += nSumSneg[2][0];
            dUdkSin += nSumSneg[2][1];
            dUdlCos += nSumSneg[3][0];
            dUdlSin += nSumSneg[3][1];
            dUdAlCos += nSumSneg[4][0];
            dUdAlSin += nSumSneg[4][1];
            dUdBeCos += nSumSneg[5][0];
            dUdBeSin += nSumSneg[5][1];
            dUdGaCos += nSumSneg[6][0];
            dUdGaSin += nSumSneg[6][1];
          }
        }

        // Assembly of potential derivatives componants
        dUda += cosPhi * dUdaCos + sinPhi * dUdaSin;
        dUdh += cosPhi * dUdhCos + sinPhi * dUdhSin;
        dUdk += cosPhi * dUdkCos + sinPhi * dUdkSin;
        dUdl += cosPhi * dUdlCos + sinPhi * dUdlSin;
        dUdAl += cosPhi * dUdAlCos + sinPhi * dUdAlSin;
        dUdBe += cosPhi * dUdBeCos + sinPhi * dUdBeSin;
        dUdGa += cosPhi * dUdGaCos + sinPhi * dUdGaSin;
      }

      dUda *= -moa / a;
      dUdh *= moa;
      dUdk *= moa;
      dUdl *= moa;
      dUdAl *= moa;
      dUdBe *= moa;
      dUdGa *= moa;
    }

    return new double[] {dUda, dUdh, dUdk, dUdl, dUdAl, dUdBe, dUdGa};
  }
Esempio n. 19
0
  /**
   * Verifies that nextPoisson(mean) generates an empirical distribution of values consistent with
   * PoissonDistributionImpl by generating 1000 values, computing a grouped frequency distribution
   * of the observed values and comparing this distribution to the corresponding expected
   * distribution computed using PoissonDistributionImpl. Uses ChiSquare test of goodness of fit to
   * evaluate the null hypothesis that the distributions are the same. If the null hypothesis can be
   * rejected with confidence 1 - alpha, the check fails.
   */
  public void checkNextPoissonConsistency(double mean) {
    // Generate sample values
    final int sampleSize = 1000; // Number of deviates to generate
    final int minExpectedCount = 7; // Minimum size of expected bin count
    long maxObservedValue = 0;
    final double alpha = 0.001; // Probability of false failure
    Frequency frequency = new Frequency();
    for (int i = 0; i < sampleSize; i++) {
      long value = randomData.nextPoisson(mean);
      if (value > maxObservedValue) {
        maxObservedValue = value;
      }
      frequency.addValue(value);
    }

    /*
     *  Set up bins for chi-square test.
     *  Ensure expected counts are all at least minExpectedCount.
     *  Start with upper and lower tail bins.
     *  Lower bin = [0, lower); Upper bin = [upper, +inf).
     */
    PoissonDistribution poissonDistribution = new PoissonDistribution(mean);
    int lower = 1;
    while (poissonDistribution.cumulativeProbability(lower - 1) * sampleSize < minExpectedCount) {
      lower++;
    }
    int upper = (int) (5 * mean); // Even for mean = 1, not much mass beyond 5
    while ((1 - poissonDistribution.cumulativeProbability(upper - 1)) * sampleSize
        < minExpectedCount) {
      upper--;
    }

    // Set bin width for interior bins.  For poisson, only need to look at end bins.
    int binWidth = 0;
    boolean widthSufficient = false;
    double lowerBinMass = 0;
    double upperBinMass = 0;
    while (!widthSufficient) {
      binWidth++;
      lowerBinMass = poissonDistribution.cumulativeProbability(lower - 1, lower + binWidth - 1);
      upperBinMass = poissonDistribution.cumulativeProbability(upper - binWidth - 1, upper - 1);
      widthSufficient = FastMath.min(lowerBinMass, upperBinMass) * sampleSize >= minExpectedCount;
    }

    /*
     *  Determine interior bin bounds.  Bins are
     *  [1, lower = binBounds[0]), [lower, binBounds[1]), [binBounds[1], binBounds[2]), ... ,
     *    [binBounds[binCount - 2], upper = binBounds[binCount - 1]), [upper, +inf)
     *
     */
    List<Integer> binBounds = new ArrayList<Integer>();
    binBounds.add(lower);
    int bound = lower + binWidth;
    while (bound < upper - binWidth) {
      binBounds.add(bound);
      bound += binWidth;
    }
    binBounds.add(
        upper); // The size of bin [binBounds[binCount - 2], upper) satisfies binWidth <= size <
                // 2*binWidth.

    // Compute observed and expected bin counts
    final int binCount = binBounds.size() + 1;
    long[] observed = new long[binCount];
    double[] expected = new double[binCount];

    // Bottom bin
    observed[0] = 0;
    for (int i = 0; i < lower; i++) {
      observed[0] += frequency.getCount(i);
    }
    expected[0] = poissonDistribution.cumulativeProbability(lower - 1) * sampleSize;

    // Top bin
    observed[binCount - 1] = 0;
    for (int i = upper; i <= maxObservedValue; i++) {
      observed[binCount - 1] += frequency.getCount(i);
    }
    expected[binCount - 1] =
        (1 - poissonDistribution.cumulativeProbability(upper - 1)) * sampleSize;

    // Interior bins
    for (int i = 1; i < binCount - 1; i++) {
      observed[i] = 0;
      for (int j = binBounds.get(i - 1); j < binBounds.get(i); j++) {
        observed[i] += frequency.getCount(j);
      } // Expected count is (mass in [binBounds[i-1], binBounds[i])) * sampleSize
      expected[i] =
          (poissonDistribution.cumulativeProbability(binBounds.get(i) - 1)
                  - poissonDistribution.cumulativeProbability(binBounds.get(i - 1) - 1))
              * sampleSize;
    }

    // Use chisquare test to verify that generated values are poisson(mean)-distributed
    ChiSquareTest chiSquareTest = new ChiSquareTest();
    // Fail if we can reject null hypothesis that distributions are the same
    if (chiSquareTest.chiSquareTest(expected, observed, alpha)) {
      StringBuilder msgBuffer = new StringBuilder();
      DecimalFormat df = new DecimalFormat("#.##");
      msgBuffer.append("Chisquare test failed for mean = ");
      msgBuffer.append(mean);
      msgBuffer.append(" p-value = ");
      msgBuffer.append(chiSquareTest.chiSquareTest(expected, observed));
      msgBuffer.append(" chisquare statistic = ");
      msgBuffer.append(chiSquareTest.chiSquare(expected, observed));
      msgBuffer.append(". \n");
      msgBuffer.append("bin\t\texpected\tobserved\n");
      for (int i = 0; i < expected.length; i++) {
        msgBuffer.append("[");
        msgBuffer.append(i == 0 ? 1 : binBounds.get(i - 1));
        msgBuffer.append(",");
        msgBuffer.append(i == binBounds.size() ? "inf" : binBounds.get(i));
        msgBuffer.append(")");
        msgBuffer.append("\t\t");
        msgBuffer.append(df.format(expected[i]));
        msgBuffer.append("\t\t");
        msgBuffer.append(observed[i]);
        msgBuffer.append("\n");
      }
      msgBuffer.append("This test can fail randomly due to sampling error with probability ");
      msgBuffer.append(alpha);
      msgBuffer.append(".");
      Assert.fail(msgBuffer.toString());
    }
  }
Esempio n. 20
0
  /**
   * Computes the Kendall's Tau rank correlation coefficient between the two arrays.
   *
   * @param xArray first data array
   * @param yArray second data array
   * @return Returns Kendall's Tau rank correlation coefficient for the two arrays
   * @throws DimensionMismatchException if the arrays lengths do not match
   */
  public double correlation(final double[] xArray, final double[] yArray)
      throws DimensionMismatchException {

    if (xArray.length != yArray.length) {
      throw new DimensionMismatchException(xArray.length, yArray.length);
    }

    final int n = xArray.length;
    final long numPairs = n * (n - 1l) / 2l;

    @SuppressWarnings("unchecked")
    Pair<Double, Double>[] pairs = new Pair[n];
    for (int i = 0; i < n; i++) {
      pairs[i] = new Pair<Double, Double>(xArray[i], yArray[i]);
    }

    Arrays.sort(
        pairs,
        new Comparator<Pair<Double, Double>>() {
          public int compare(Pair<Double, Double> pair1, Pair<Double, Double> pair2) {
            int compareFirst = pair1.getFirst().compareTo(pair2.getFirst());
            return compareFirst != 0
                ? compareFirst
                : pair1.getSecond().compareTo(pair2.getSecond());
          }
        });

    int tiedXPairs = 0;
    int tiedXYPairs = 0;
    int consecutiveXTies = 1;
    int consecutiveXYTies = 1;
    Pair<Double, Double> prev = pairs[0];
    for (int i = 1; i < n; i++) {
      final Pair<Double, Double> curr = pairs[i];
      if (curr.getFirst().equals(prev.getFirst())) {
        consecutiveXTies++;
        if (curr.getSecond().equals(prev.getSecond())) {
          consecutiveXYTies++;
        } else {
          tiedXYPairs += consecutiveXYTies * (consecutiveXYTies - 1) / 2;
          consecutiveXYTies = 1;
        }
      } else {
        tiedXPairs += consecutiveXTies * (consecutiveXTies - 1) / 2;
        consecutiveXTies = 1;
        tiedXYPairs += consecutiveXYTies * (consecutiveXYTies - 1) / 2;
        consecutiveXYTies = 1;
      }
      prev = curr;
    }
    tiedXPairs += consecutiveXTies * (consecutiveXTies - 1) / 2;
    tiedXYPairs += consecutiveXYTies * (consecutiveXYTies - 1) / 2;

    int swaps = 0;
    @SuppressWarnings("unchecked")
    Pair<Double, Double>[] pairsDestination = new Pair[n];
    for (int segmentSize = 1; segmentSize < n; segmentSize <<= 1) {
      for (int offset = 0; offset < n; offset += 2 * segmentSize) {
        int i = offset;
        final int iEnd = FastMath.min(i + segmentSize, n);
        int j = iEnd;
        final int jEnd = FastMath.min(j + segmentSize, n);

        int copyLocation = offset;
        while (i < iEnd || j < jEnd) {
          if (i < iEnd) {
            if (j < jEnd) {
              if (pairs[i].getSecond().compareTo(pairs[j].getSecond()) <= 0) {
                pairsDestination[copyLocation] = pairs[i];
                i++;
              } else {
                pairsDestination[copyLocation] = pairs[j];
                j++;
                swaps += iEnd - i;
              }
            } else {
              pairsDestination[copyLocation] = pairs[i];
              i++;
            }
          } else {
            pairsDestination[copyLocation] = pairs[j];
            j++;
          }
          copyLocation++;
        }
      }
      final Pair<Double, Double>[] pairsTemp = pairs;
      pairs = pairsDestination;
      pairsDestination = pairsTemp;
    }

    int tiedYPairs = 0;
    int consecutiveYTies = 1;
    prev = pairs[0];
    for (int i = 1; i < n; i++) {
      final Pair<Double, Double> curr = pairs[i];
      if (curr.getSecond().equals(prev.getSecond())) {
        consecutiveYTies++;
      } else {
        tiedYPairs += consecutiveYTies * (consecutiveYTies - 1) / 2;
        consecutiveYTies = 1;
      }
      prev = curr;
    }
    tiedYPairs += consecutiveYTies * (consecutiveYTies - 1) / 2;

    int concordantMinusDiscordant = numPairs - tiedXPairs - tiedYPairs + tiedXYPairs - 2 * swaps;
    return concordantMinusDiscordant
        / FastMath.sqrt((numPairs - tiedXPairs) * (numPairs - tiedYPairs));
  }
  /** {@inheritDoc} */
  public Optimum optimize(final LeastSquaresProblem problem) {
    // Pull in relevant data from the problem as locals.
    final int nR = problem.getObservationSize(); // Number of observed data.
    final int nC = problem.getParameterSize(); // Number of parameters.
    // Counters.
    final Incrementor iterationCounter = problem.getIterationCounter();
    final Incrementor evaluationCounter = problem.getEvaluationCounter();
    // Convergence criterion.
    final ConvergenceChecker<Evaluation> checker = problem.getConvergenceChecker();

    // arrays shared with the other private methods
    final int solvedCols = FastMath.min(nR, nC);
    /* Parameters evolution direction associated with lmPar. */
    double[] lmDir = new double[nC];
    /* Levenberg-Marquardt parameter. */
    double lmPar = 0;

    // local point
    double delta = 0;
    double xNorm = 0;
    double[] diag = new double[nC];
    double[] oldX = new double[nC];
    double[] oldRes = new double[nR];
    double[] qtf = new double[nR];
    double[] work1 = new double[nC];
    double[] work2 = new double[nC];
    double[] work3 = new double[nC];

    // Evaluate the function at the starting point and calculate its norm.
    evaluationCounter.incrementCount();
    // value will be reassigned in the loop
    Evaluation current = problem.evaluate(problem.getStart());
    double[] currentResiduals = current.getResiduals().toArray();
    double currentCost = current.getCost();
    double[] currentPoint = current.getPoint().toArray();

    // Outer loop.
    boolean firstIteration = true;
    while (true) {
      iterationCounter.incrementCount();

      final Evaluation previous = current;

      // QR decomposition of the jacobian matrix
      final InternalData internalData = qrDecomposition(current.getJacobian(), solvedCols);
      final double[][] weightedJacobian = internalData.weightedJacobian;
      final int[] permutation = internalData.permutation;
      final double[] diagR = internalData.diagR;
      final double[] jacNorm = internalData.jacNorm;

      // residuals already have weights applied
      double[] weightedResidual = currentResiduals;
      for (int i = 0; i < nR; i++) {
        qtf[i] = weightedResidual[i];
      }

      // compute Qt.res
      qTy(qtf, internalData);

      // now we don't need Q anymore,
      // so let jacobian contain the R matrix with its diagonal elements
      for (int k = 0; k < solvedCols; ++k) {
        int pk = permutation[k];
        weightedJacobian[k][pk] = diagR[pk];
      }

      if (firstIteration) {
        // scale the point according to the norms of the columns
        // of the initial jacobian
        xNorm = 0;
        for (int k = 0; k < nC; ++k) {
          double dk = jacNorm[k];
          if (dk == 0) {
            dk = 1.0;
          }
          double xk = dk * currentPoint[k];
          xNorm += xk * xk;
          diag[k] = dk;
        }
        xNorm = FastMath.sqrt(xNorm);

        // initialize the step bound delta
        delta = (xNorm == 0) ? initialStepBoundFactor : (initialStepBoundFactor * xNorm);
      }

      // check orthogonality between function vector and jacobian columns
      double maxCosine = 0;
      if (currentCost != 0) {
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          double s = jacNorm[pj];
          if (s != 0) {
            double sum = 0;
            for (int i = 0; i <= j; ++i) {
              sum += weightedJacobian[i][pj] * qtf[i];
            }
            maxCosine = FastMath.max(maxCosine, FastMath.abs(sum) / (s * currentCost));
          }
        }
      }
      if (maxCosine <= orthoTolerance) {
        // Convergence has been reached.
        return new OptimumImpl(current, evaluationCounter.getCount(), iterationCounter.getCount());
      }

      // rescale if necessary
      for (int j = 0; j < nC; ++j) {
        diag[j] = FastMath.max(diag[j], jacNorm[j]);
      }

      // Inner loop.
      for (double ratio = 0; ratio < 1.0e-4; ) {

        // save the state
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          oldX[pj] = currentPoint[pj];
        }
        final double previousCost = currentCost;
        double[] tmpVec = weightedResidual;
        weightedResidual = oldRes;
        oldRes = tmpVec;

        // determine the Levenberg-Marquardt parameter
        lmPar =
            determineLMParameter(
                qtf, delta, diag, internalData, solvedCols, work1, work2, work3, lmDir, lmPar);

        // compute the new point and the norm of the evolution direction
        double lmNorm = 0;
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          lmDir[pj] = -lmDir[pj];
          currentPoint[pj] = oldX[pj] + lmDir[pj];
          double s = diag[pj] * lmDir[pj];
          lmNorm += s * s;
        }
        lmNorm = FastMath.sqrt(lmNorm);
        // on the first iteration, adjust the initial step bound.
        if (firstIteration) {
          delta = FastMath.min(delta, lmNorm);
        }

        // Evaluate the function at x + p and calculate its norm.
        evaluationCounter.incrementCount();
        current = problem.evaluate(new ArrayRealVector(currentPoint));
        currentResiduals = current.getResiduals().toArray();
        currentCost = current.getCost();
        currentPoint = current.getPoint().toArray();

        // compute the scaled actual reduction
        double actRed = -1.0;
        if (0.1 * currentCost < previousCost) {
          double r = currentCost / previousCost;
          actRed = 1.0 - r * r;
        }

        // compute the scaled predicted reduction
        // and the scaled directional derivative
        for (int j = 0; j < solvedCols; ++j) {
          int pj = permutation[j];
          double dirJ = lmDir[pj];
          work1[j] = 0;
          for (int i = 0; i <= j; ++i) {
            work1[i] += weightedJacobian[i][pj] * dirJ;
          }
        }
        double coeff1 = 0;
        for (int j = 0; j < solvedCols; ++j) {
          coeff1 += work1[j] * work1[j];
        }
        double pc2 = previousCost * previousCost;
        coeff1 /= pc2;
        double coeff2 = lmPar * lmNorm * lmNorm / pc2;
        double preRed = coeff1 + 2 * coeff2;
        double dirDer = -(coeff1 + coeff2);

        // ratio of the actual to the predicted reduction
        ratio = (preRed == 0) ? 0 : (actRed / preRed);

        // update the step bound
        if (ratio <= 0.25) {
          double tmp = (actRed < 0) ? (0.5 * dirDer / (dirDer + 0.5 * actRed)) : 0.5;
          if ((0.1 * currentCost >= previousCost) || (tmp < 0.1)) {
            tmp = 0.1;
          }
          delta = tmp * FastMath.min(delta, 10.0 * lmNorm);
          lmPar /= tmp;
        } else if ((lmPar == 0) || (ratio >= 0.75)) {
          delta = 2 * lmNorm;
          lmPar *= 0.5;
        }

        // test for successful iteration.
        if (ratio >= 1.0e-4) {
          // successful iteration, update the norm
          firstIteration = false;
          xNorm = 0;
          for (int k = 0; k < nC; ++k) {
            double xK = diag[k] * currentPoint[k];
            xNorm += xK * xK;
          }
          xNorm = FastMath.sqrt(xNorm);

          // tests for convergence.
          if (checker != null
              && checker.converged(iterationCounter.getCount(), previous, current)) {
            return new OptimumImpl(
                current, evaluationCounter.getCount(), iterationCounter.getCount());
          }
        } else {
          // failed iteration, reset the previous values
          currentCost = previousCost;
          for (int j = 0; j < solvedCols; ++j) {
            int pj = permutation[j];
            currentPoint[pj] = oldX[pj];
          }
          tmpVec = weightedResidual;
          weightedResidual = oldRes;
          oldRes = tmpVec;
          // Reset "current" to previous values.
          current = previous;
        }

        // Default convergence criteria.
        if ((FastMath.abs(actRed) <= costRelativeTolerance
                && preRed <= costRelativeTolerance
                && ratio <= 2.0)
            || delta <= parRelativeTolerance * xNorm) {
          return new OptimumImpl(
              current, evaluationCounter.getCount(), iterationCounter.getCount());
        }

        // tests for termination and stringent tolerances
        if (FastMath.abs(actRed) <= TWO_EPS && preRed <= TWO_EPS && ratio <= 2.0) {
          throw new ConvergenceException(
              LocalizedFormats.TOO_SMALL_COST_RELATIVE_TOLERANCE, costRelativeTolerance);
        } else if (delta <= TWO_EPS * xNorm) {
          throw new ConvergenceException(
              LocalizedFormats.TOO_SMALL_PARAMETERS_RELATIVE_TOLERANCE, parRelativeTolerance);
        } else if (maxCosine <= TWO_EPS) {
          throw new ConvergenceException(
              LocalizedFormats.TOO_SMALL_ORTHOGONALITY_TOLERANCE, orthoTolerance);
        }
      }
    }
  }