コード例 #1
0
ファイル: Kurtosis.java プロジェクト: OliverKramer02/Cy_3
 /*  77:    */
 /*  78:    */ public double evaluate(double[] values, int begin, int length) /*  79:    */ {
   /*  80:166 */ double kurt = (0.0D / 0.0D);
   /*  81:168 */ if ((test(values, begin, length)) && (length > 3))
   /*  82:    */ {
     /*  83:171 */ Variance variance = new Variance();
     /*  84:172 */ variance.incrementAll(values, begin, length);
     /*  85:173 */ double mean = variance.moment.m1;
     /*  86:174 */ double stdDev = FastMath.sqrt(variance.getResult());
     /*  87:    */
     /*  88:    */
     /*  89:    */
     /*  90:178 */ double accum3 = 0.0D;
     /*  91:179 */ for (int i = begin; i < begin + length; i++) {
       /*  92:180 */ accum3 += FastMath.pow(values[i] - mean, 4.0D);
       /*  93:    */ }
     /*  94:182 */ accum3 /= FastMath.pow(stdDev, 4.0D);
     /*  95:    */
     /*  96:    */
     /*  97:185 */ double n0 = length;
     /*  98:    */
     /*  99:187 */ double coefficientOne =
         n0 * (n0 + 1.0D) / ((n0 - 1.0D) * (n0 - 2.0D) * (n0 - 3.0D));
     /* 100:    */
     /* 101:189 */ double termTwo =
         3.0D * FastMath.pow(n0 - 1.0D, 2.0D) / ((n0 - 2.0D) * (n0 - 3.0D));
     /* 102:    */
     /* 103:    */
     /* 104:    */
     /* 105:193 */ kurt = coefficientOne * accum3 - termTwo;
     /* 106:    */ }
   /* 107:195 */ return kurt;
   /* 108:    */ }
コード例 #2
0
  @Test
  public void testAdamUpdater() {
    INDArray m, v;
    double lr = 0.01;
    int iteration = 0;
    double beta1 = 0.8;
    double beta2 = 0.888;

    NeuralNetConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .learningRate(lr)
            .iterations(iteration)
            .adamMeanDecay(beta1)
            .adamVarDecay(beta2)
            .layer(
                new DenseLayer.Builder()
                    .nIn(nIn)
                    .nOut(nOut)
                    .updater(org.deeplearning4j.nn.conf.Updater.ADAM)
                    .build())
            .build();

    int numParams = LayerFactories.getFactory(conf).initializer().numParams(conf, true);
    INDArray params = Nd4j.create(1, numParams);
    Layer layer = LayerFactories.getFactory(conf).create(conf, null, 0, params, true);
    Updater updater = UpdaterCreator.getUpdater(layer);
    int updaterStateSize = updater.stateSizeForLayer(layer);
    INDArray updaterState = Nd4j.create(1, updaterStateSize);
    updater.setStateViewArray(layer, updaterState, true);

    updater.update(layer, gradient, iteration, 1);

    double beta1t = FastMath.pow(beta1, iteration);
    double beta2t = FastMath.pow(beta2, iteration);
    double alphat = lr * FastMath.sqrt(1 - beta2t) / (1 - beta1t);
    if (Double.isNaN(alphat) || alphat == 0.0) alphat = epsilon;

    Gradient gradientDup = new DefaultGradient();
    gradientDup.setGradientFor(DefaultParamInitializer.WEIGHT_KEY, weightGradient);
    gradientDup.setGradientFor(DefaultParamInitializer.BIAS_KEY, biasGradient);

    for (Map.Entry<String, INDArray> entry : gradientDup.gradientForVariable().entrySet()) {
      val = entry.getValue();
      m = Nd4j.zeros(val.shape());
      v = Nd4j.zeros(val.shape());

      m.muli(beta1).addi(val.mul(1.0 - beta1));
      v.muli(beta2).addi(val.mul(val).mul(1.0 - beta2));
      gradExpected = m.mul(alphat).divi(Transforms.sqrt(v).addi(epsilon));
      if (!gradExpected.equals(gradient.getGradientFor(entry.getKey()))) {
        System.out.println(Arrays.toString(gradExpected.dup().data().asFloat()));
        System.out.println(
            Arrays.toString(gradient.getGradientFor(entry.getKey()).dup().data().asFloat()));
      }
      assertEquals(gradExpected, gradient.getGradientFor(entry.getKey()));
    }

    assertEquals(beta1, layer.conf().getLayer().getAdamMeanDecay(), 1e-4);
    assertEquals(beta2, layer.conf().getLayer().getAdamVarDecay(), 1e-4);
  }
コード例 #3
0
ファイル: BOBYQAOptimizerTest.java プロジェクト: mfwzan/astor
 public double value(double[] x) {
   double f = 0;
   x = B.Rotate(x);
   for (int i = 0; i < x.length; ++i)
     f += FastMath.pow(factor, i / (x.length - 1.)) * x[i] * x[i];
   return f;
 }
コード例 #4
0
ファイル: Complex.java プロジェクト: mfwzan/astor
  /**
   * Computes the n-th roots of this complex number. The nth roots are defined by the formula:
   *
   * <pre>
   *  <code>
   *   z<sub>k</sub> = abs<sup>1/n</sup> (cos(phi + 2&pi;k/n) + i (sin(phi + 2&pi;k/n))
   *  </code>
   * </pre>
   *
   * for <i>{@code k=0, 1, ..., n-1}</i>, where {@code abs} and {@code phi} are respectively the
   * {@link #abs() modulus} and {@link #getArgument() argument} of this complex number. <br>
   * If one or both parts of this complex number is NaN, a list with just one element, {@link #NaN}
   * is returned. if neither part is NaN, but at least one part is infinite, the result is a
   * one-element list containing {@link #INF}.
   *
   * @param n Degree of root.
   * @return a List<Complex> of all {@code n}-th roots of {@code this}.
   * @throws NotPositiveException if {@code n <= 0}.
   * @since 2.0
   */
  public List<Complex> nthRoot(int n) throws NotPositiveException {

    if (n <= 0) {
      throw new NotPositiveException(LocalizedFormats.CANNOT_COMPUTE_NTH_ROOT_FOR_NEGATIVE_N, n);
    }

    final List<Complex> result = new ArrayList<Complex>();

    if (isNaN) {
      result.add(NaN);
      return result;
    }
    if (isInfinite()) {
      result.add(INF);
      return result;
    }

    // nth root of abs -- faster / more accurate to use a solver here?
    final double nthRootOfAbs = FastMath.pow(abs(), 1.0 / n);

    // Compute nth roots of complex number with k = 0, 1, ... n-1
    final double nthPhi = getArgument() / n;
    final double slice = 2 * FastMath.PI / n;
    double innerPart = nthPhi;
    for (int k = 0; k < n; k++) {
      // inner part
      final double realPart = nthRootOfAbs * FastMath.cos(innerPart);
      final double imaginaryPart = nthRootOfAbs * FastMath.sin(innerPart);
      result.add(createComplex(realPart, imaginaryPart));
      innerPart += slice;
    }

    return result;
  }
コード例 #5
0
  /**
   * Calculates {@code P(D_n < d)} using method described in [1] with quick decisions for extreme
   * values given in [2] (see above).
   *
   * @param d statistic
   * @param exact whether the probability should be calculated exact using {@link
   *     org.apache.commons.math3.fraction.BigFraction} everywhere at the expense of very slow
   *     execution time, or if {@code double} should be used convenient places to gain speed. Almost
   *     never choose {@code true} in real applications unless you are very sure; {@code true} is
   *     almost solely for verification purposes.
   * @return the two-sided probability of {@code P(D_n < d)}
   * @throws MathArithmeticException if algorithm fails to convert {@code h} to a {@link
   *     org.apache.commons.math3.fraction.BigFraction} in expressing {@code d} as {@code (k - h) /
   *     m} for integer {@code k, m} and {@code 0 <= h < 1}.
   */
  public double cdf(double d, boolean exact) throws MathArithmeticException {

    final double ninv = 1 / ((double) n);
    final double ninvhalf = 0.5 * ninv;

    if (d <= ninvhalf) {

      return 0;

    } else if (ninvhalf < d && d <= ninv) {

      double res = 1;
      double f = 2 * d - ninv;

      // n! f^n = n*f * (n-1)*f * ... * 1*x
      for (int i = 1; i <= n; ++i) {
        res *= i * f;
      }

      return res;

    } else if (1 - ninv <= d && d < 1) {

      return 1 - 2 * FastMath.pow(1 - d, n);

    } else if (1 <= d) {

      return 1;
    }

    return exact ? exactK(d) : roundedK(d);
  }
コード例 #6
0
  public double applyCalibration(
      final double v,
      final double rangeIndex,
      final double azimuthIndex,
      final double slantRange,
      final double satelliteHeight,
      final double sceneToEarthCentre,
      final double localIncidenceAngle,
      final String bandPolar,
      final Unit.UnitType bandUnit,
      int[] subSwathIndex) {

    double sigma = 0.0;
    if (bandUnit == Unit.UnitType.AMPLITUDE) {
      sigma = v * v;
    } else if (bandUnit == Unit.UnitType.INTENSITY
        || bandUnit == Unit.UnitType.REAL
        || bandUnit == Unit.UnitType.IMAGINARY) {
      sigma = v;
    } else if (bandUnit == Unit.UnitType.INTENSITY_DB) {
      sigma = FastMath.pow(10, v / 10.0); // convert dB to linear scale
    } else {
      throw new OperatorException("Unknown band unit");
    }

    if (incidenceAngleSelection.contains(USE_INCIDENCE_ANGLE_FROM_DEM)) {
      return sigma * calibrationFactor * FastMath.sin(localIncidenceAngle * Constants.DTOR);
    } else { // USE_INCIDENCE_ANGLE_FROM_ELLIPSOID
      return sigma * calibrationFactor;
    }
  }
コード例 #7
0
ファイル: BOBYQAOptimizerTest.java プロジェクト: mfwzan/astor
 //        private int fcount = 0;
 public double value(double[] x) {
   double f = 0;
   for (int i = 0; i < x.length; ++i)
     f += FastMath.pow(FastMath.abs(x[i]), 2. + 10 * (double) i / (x.length - 1.));
   //            System.out.print("" + (fcount++) + ") ");
   //            for (int i = 0; i < x.length; i++)
   //                System.out.print(x[i] +  " ");
   //            System.out.println(" = " + f);
   return f;
 }
コード例 #8
0
  /** Get calibration factor. */
  private void getCalibrationFactor() {

    calibrationFactor = absRoot.getAttributeDouble(AbstractMetadata.calibration_factor);

    if (isComplex) {
      calibrationFactor -= 32.0; // calibration factor offset is 32 dB
    }

    calibrationFactor = FastMath.pow(10.0, calibrationFactor / 10.0); // dB to linear scale
    // System.out.println("Calibration factor is " + calibrationFactor);
  }
コード例 #9
0
  /** {@inheritDoc} */
  public ConfidenceInterval createInterval(
      int numberOfTrials, int numberOfSuccesses, double confidenceLevel) {
    IntervalUtils.checkParameters(numberOfTrials, numberOfSuccesses, confidenceLevel);
    final double alpha = (1.0 - confidenceLevel) / 2;
    final NormalDistribution normalDistribution = new NormalDistribution();
    final double z = normalDistribution.inverseCumulativeProbability(1 - alpha);
    final double zSquared = FastMath.pow(z, 2);
    final double mean = (double) numberOfSuccesses / (double) numberOfTrials;

    final double factor = 1.0 / (1 + (1.0 / numberOfTrials) * zSquared);
    final double modifiedSuccessRatio = mean + (1.0 / (2 * numberOfTrials)) * zSquared;
    final double difference =
        z
            * FastMath.sqrt(
                1.0 / numberOfTrials * mean * (1 - mean)
                    + (1.0 / (4 * FastMath.pow(numberOfTrials, 2)) * zSquared));

    final double lowerBound = factor * (modifiedSuccessRatio - difference);
    final double upperBound = factor * (modifiedSuccessRatio + difference);
    return new ConfidenceInterval(lowerBound, upperBound, confidenceLevel);
  }
コード例 #10
0
 public static ItemType chooseOldItemType(double logParam, List<ItemType> itemTypes, double rand) {
   // sum of log values for each category in itemTypesUsed
   // log( (x + 1) / x), log-base to be given
   double totalWeight = FastMath.log(logParam, itemTypes.size() + 1);
   // x = 2 * k ^ (y - 1), where k is the log base, y is a random number between 1 and totalWeight
   // basically given a random number, decides which category to reuse.
   double scaledRand = rand * totalWeight;
   int index =
       (int) (FastMath.pow(logParam, scaledRand))
           - 1; // gives a value between 0 and itemTypesUsed.size() exclusive
   return itemTypes.get(index);
 }
コード例 #11
0
ファイル: Product.java プロジェクト: vikasdhar/symja
 /**
  * Returns the weighted product of the entries in the specified portion of the input array, or
  * <code>Double.NaN</code> if the designated subarray is empty.
  *
  * <p>Throws <code>MathIllegalArgumentException</code> if any of the following are true:
  *
  * <ul>
  *   <li>the values array is null
  *   <li>the weights array is null
  *   <li>the weights array does not have the same length as the values array
  *   <li>the weights array contains one or more infinite values
  *   <li>the weights array contains one or more NaN values
  *   <li>the weights array contains negative values
  *   <li>the start and length arguments do not determine a valid array
  * </ul>
  *
  * <p>Uses the formula,
  *
  * <pre>
  *    weighted product = &prod;values[i]<sup>weights[i]</sup>
  * </pre>
  *
  * that is, the weights are applied as exponents when computing the weighted product.
  *
  * @param values the input array
  * @param weights the weights array
  * @param begin index of the first array element to include
  * @param length the number of elements to include
  * @return the product of the values or 1 if length = 0
  * @throws MathIllegalArgumentException if the parameters are not valid
  * @since 2.1
  */
 public double evaluate(
     final double[] values, final double[] weights, final int begin, final int length)
     throws MathIllegalArgumentException {
   double product = Double.NaN;
   if (test(values, weights, begin, length, true)) {
     product = 1.0;
     for (int i = begin; i < begin + length; i++) {
       product *= FastMath.pow(values[i], weights[i]);
     }
   }
   return product;
 }
コード例 #12
0
ファイル: BOBYQAOptimizerTest.java プロジェクト: mfwzan/astor
 public double value(double[] x) {
   double f = 0;
   double fac;
   for (int i = 0; i < x.length; ++i) {
     fac = FastMath.pow(axisratio, (i - 1.) / (x.length - 1.));
     if (i == 0 && x[i] < 0) fac *= 1.;
     f +=
         fac * fac * x[i] * x[i]
             + amplitude * (1. - FastMath.cos(2. * FastMath.PI * fac * x[i]));
   }
   return f;
 }
コード例 #13
0
ファイル: BOBYQAOptimizerTest.java プロジェクト: mfwzan/astor
 public double value(double[] x) {
   double f = 0;
   double res2 = 0;
   double fac = 0;
   for (int i = 0; i < x.length; ++i) {
     fac = FastMath.pow(axisratio, (i - 1.) / (x.length - 1.));
     f += fac * fac * x[i] * x[i];
     res2 += FastMath.cos(2. * FastMath.PI * fac * x[i]);
   }
   f =
       (20.
           - 20. * FastMath.exp(-0.2 * FastMath.sqrt(f / x.length))
           + FastMath.exp(1.)
           - FastMath.exp(res2 / x.length));
   return f;
 }
コード例 #14
0
ファイル: IntervalsSetTest.java プロジェクト: PAMSE/APGBench
  @Test
  public void testInfinite() {
    IntervalsSet set = new IntervalsSet(9.0, Double.POSITIVE_INFINITY);
    Assert.assertEquals(Region.Location.BOUNDARY, set.checkPoint(new Vector1D(9.0)));
    Assert.assertEquals(Region.Location.OUTSIDE, set.checkPoint(new Vector1D(8.4)));
    for (double e = 1.0; e <= 6.0; e += 1.0) {
      Assert.assertEquals(
          Region.Location.INSIDE, set.checkPoint(new Vector1D(FastMath.pow(10.0, e))));
    }
    Assert.assertTrue(Double.isInfinite(set.getSize()));
    Assert.assertEquals(9.0, set.getInf(), 1.0e-10);
    Assert.assertTrue(Double.isInfinite(set.getSup()));

    set = (IntervalsSet) new RegionFactory<Euclidean1D>().getComplement(set);
    Assert.assertEquals(9.0, set.getSup(), 1.0e-10);
    Assert.assertTrue(Double.isInfinite(set.getInf()));
  }
コード例 #15
0
  @Override
  public double getPartialSimilarity(double[] a, double[] b) {
    final double lp = toHilbertPSpace(a, b);

    // Per corner case condition
    if (lp >= getSigma()) return 0.0;

    final double twoOverPi = (2d / FastMath.PI);
    final double lpOverSig = lp / getSigma();

    /* Front segment */
    final double front = twoOverPi * FastMath.acos(-lpOverSig);

    /* Back segment */
    final double first = twoOverPi * lpOverSig;
    final double second = FastMath.sqrt(1.0 - FastMath.pow(lpOverSig, 2));
    final double back = first * second;
    final double answer = front - back;

    return Double.isNaN(answer) ? Double.NEGATIVE_INFINITY : answer;
  }
コード例 #16
0
  @Test
  public void testIncreasingTolerance()
      throws DimensionMismatchException, NumberIsTooSmallException, MaxCountExceededException,
          NoBracketingException {

    int previousCalls = Integer.MAX_VALUE;
    AdaptiveStepsizeIntegrator integ =
        new DormandPrince853Integrator(0, Double.POSITIVE_INFINITY, Double.NaN, Double.NaN);
    for (int i = -12; i < -2; ++i) {
      TestProblem1 pb = new TestProblem1();
      double minStep = 0;
      double maxStep = pb.getFinalTime() - pb.getInitialTime();
      double scalAbsoluteTolerance = FastMath.pow(10.0, i);
      double scalRelativeTolerance = 0.01 * scalAbsoluteTolerance;
      integ.setStepSizeControl(minStep, maxStep, scalAbsoluteTolerance, scalRelativeTolerance);

      TestProblemHandler handler = new TestProblemHandler(pb, integ);
      integ.addStepHandler(handler);
      integ.integrate(
          pb,
          pb.getInitialTime(),
          pb.getInitialState(),
          pb.getFinalTime(),
          new double[pb.getDimension()]);

      // the 1.3 factor is only valid for this test
      // and has been obtained from trial and error
      // there is no general relation between local and global errors
      Assert.assertTrue(handler.getMaximalValueError() < (1.3 * scalAbsoluteTolerance));
      Assert.assertEquals(0, handler.getMaximalTimeError(), 1.0e-12);

      int calls = pb.getCalls();
      Assert.assertEquals(integ.getEvaluations(), calls);
      Assert.assertTrue(calls <= previousCalls);
      previousCalls = calls;
    }
  }
コード例 #17
0
ファイル: Ellipsoid.java プロジェクト: fpl/s1tbx
 private static double computeEllipsoidNormal(final double phi) {
   return a / Math.sqrt(1.0 - e2 * FastMath.pow(FastMath.sin(phi), 2));
 }
コード例 #18
0
ファイル: BOBYQAOptimizerTest.java プロジェクト: mfwzan/astor
 public double value(double[] x) {
   double f = FastMath.pow(new DiffPow().value(x), 0.25);
   return f;
 }
コード例 #19
0
ファイル: Ellipsoid.java プロジェクト: fpl/s1tbx
 private double computeCurvatureRadiusInMeridianPlane(final double phi) {
   return a * (1 - e2) / FastMath.pow((1 - e2 * FastMath.pow(FastMath.sin(phi), 2)), 3 / 2);
 }
コード例 #20
0
ファイル: CPM.java プロジェクト: fpl/s1tbx
  private void estimateCPM() {

    logger.info("Start EJML Estimation");

    numIterations = 0;
    boolean estimationDone = false;

    DenseMatrix64F eL_hat = null;
    DenseMatrix64F eP_hat = null;
    DenseMatrix64F rhsL = null;
    DenseMatrix64F rhsP = null;

    // normalize master coordinates for stability -- only master!
    TDoubleArrayList yMasterNorm = new TDoubleArrayList();
    TDoubleArrayList xMasterNorm = new TDoubleArrayList();
    for (int i = 0; i < yMaster.size(); i++) {
      yMasterNorm.add(PolyUtils.normalize2(yMaster.getQuick(i), normWin.linelo, normWin.linehi));
      xMasterNorm.add(PolyUtils.normalize2(xMaster.getQuick(i), normWin.pixlo, normWin.pixhi));
    }

    // helper variables
    int winL;
    int winP;
    int maxWSum_idx = 0;

    while (!estimationDone) {

      String codeBlockMessage = "LS ESTIMATION PROCEDURE";
      StopWatch stopWatch = new StopWatch();
      StopWatch clock = new StopWatch();
      clock.start();
      stopWatch.setTag(codeBlockMessage);
      stopWatch.start();

      logger.info("Start iteration: {}" + numIterations);

      /** Remove identified outlier from previous estimation */
      if (numIterations != 0) {
        logger.info(
            "Removing observation {}, idxList {},  from observation vector."
                + index.getQuick(maxWSum_idx)
                + maxWSum_idx);
        index.removeAt(maxWSum_idx);
        yMasterNorm.removeAt(maxWSum_idx);
        xMasterNorm.removeAt(maxWSum_idx);
        yOffset.removeAt(maxWSum_idx);
        xOffset.removeAt(maxWSum_idx);

        // only for outlier removal
        yMaster.removeAt(maxWSum_idx);
        xMaster.removeAt(maxWSum_idx);
        ySlave.removeAt(maxWSum_idx);
        xSlave.removeAt(maxWSum_idx);
        coherence.removeAt(maxWSum_idx);

        // also take care of slave pins
        slaveGCPList.remove(maxWSum_idx);

        //                if (demRefinement) {
        //                    ySlaveGeometry.removeAt(maxWSum_idx);
        //                    xSlaveGeometry.removeAt(maxWSum_idx);
        //                }

      }

      /** Check redundancy */
      numObservations = index.size(); // Number of points > threshold
      if (numObservations < numUnknowns) {
        logger.severe(
            "coregpm: Number of windows > threshold is smaller than parameters solved for.");
        throw new ArithmeticException(
            "coregpm: Number of windows > threshold is smaller than parameters solved for.");
      }

      // work with normalized values
      DenseMatrix64F A =
          new DenseMatrix64F(
              SystemOfEquations.constructDesignMatrix_loop(
                  yMasterNorm.toArray(), xMasterNorm.toArray(), cpmDegree));

      logger.info("TIME FOR SETUP of SYSTEM : {}" + stopWatch.lap("setup"));

      RowD1Matrix64F Qy_1; // vector
      double meanValue;
      switch (cpmWeight) {
        case "linear":
          logger.info("Using sqrt(coherence) as weights");
          Qy_1 = DenseMatrix64F.wrap(numObservations, 1, coherence.toArray());
          // Normalize weights to avoid influence on estimated var.factor
          logger.info("Normalizing covariance matrix for LS estimation");
          meanValue = CommonOps.elementSum(Qy_1) / numObservations;
          CommonOps.divide(meanValue, Qy_1); // normalize vector
          break;
        case "quadratic":
          logger.info("Using coherence as weights.");
          Qy_1 = DenseMatrix64F.wrap(numObservations, 1, coherence.toArray());
          CommonOps.elementMult(Qy_1, Qy_1);
          // Normalize weights to avoid influence on estimated var.factor
          meanValue = CommonOps.elementSum(Qy_1) / numObservations;
          logger.info("Normalizing covariance matrix for LS estimation.");
          CommonOps.divide(meanValue, Qy_1); // normalize vector
          break;
        case "bamler":
          // TODO: see Bamler papers IGARSS 2000 and 2004
          logger.warning("Bamler weighting method NOT IMPLEMENTED, falling back to None.");
          Qy_1 = onesEJML(numObservations);
          break;
        case "none":
          logger.info("No weighting.");
          Qy_1 = onesEJML(numObservations);
          break;
        default:
          Qy_1 = onesEJML(numObservations);
          break;
      }

      logger.info("TIME FOR SETUP of VC diag matrix: {}" + stopWatch.lap("diag VC matrix"));

      /** tempMatrix_1 matrices */
      final DenseMatrix64F yL_matrix = DenseMatrix64F.wrap(numObservations, 1, yOffset.toArray());
      final DenseMatrix64F yP_matrix = DenseMatrix64F.wrap(numObservations, 1, xOffset.toArray());
      logger.info("TIME FOR SETUP of TEMP MATRICES: {}" + stopWatch.lap("Temp matrices"));

      /** normal matrix */
      final DenseMatrix64F N =
          new DenseMatrix64F(numUnknowns, numUnknowns); // = A_transpose.mmul(Qy_1_diag.mmul(A));

      /*
                  // fork/join parallel implementation
                  RowD1Matrix64F result = A.copy();
                  DiagXMat dd = new DiagXMat(Qy_1, A, 0, A.numRows, result);
                  ForkJoinPool pool = new ForkJoinPool();
                  pool.invoke(dd);
                  CommonOps.multAddTransA(A, dd.result, N);
      */

      CommonOps.multAddTransA(A, diagxmat(Qy_1, A), N);
      DenseMatrix64F Qx_hat = N.copy();

      logger.info("TIME FOR SETUP of NORMAL MATRIX: {}" + stopWatch.lap("Normal matrix"));

      /** right hand sides */
      // azimuth
      rhsL = new DenseMatrix64F(numUnknowns, 1); // A_transpose.mmul(Qy_1_diag.mmul(yL_matrix));
      CommonOps.multAddTransA(1d, A, diagxmat(Qy_1, yL_matrix), rhsL);
      // range
      rhsP = new DenseMatrix64F(numUnknowns, 1); // A_transpose.mmul(Qy_1_diag.mmul(yP_matrix));
      CommonOps.multAddTransA(1d, A, diagxmat(Qy_1, yP_matrix), rhsP);
      logger.info("TIME FOR SETUP of RightHand Side: {}" + stopWatch.lap("Right-hand-side"));

      LinearSolver<DenseMatrix64F> solver = LinearSolverFactory.leastSquares(100, 100);
      /** compute solution */
      if (!solver.setA(Qx_hat)) {
        throw new IllegalArgumentException("Singular Matrix");
      }
      solver.solve(rhsL, rhsL);
      solver.solve(rhsP, rhsP);
      logger.info("TIME FOR SOLVING of System: {}" + stopWatch.lap("Solving System"));

      /** inverting of Qx_hat for stability check */
      solver.invert(Qx_hat);

      logger.info("TIME FOR INVERSION OF N: {}" + stopWatch.lap("Inversion of N"));

      /** test inversion and check stability: max(abs([N*inv(N) - E)) ?= 0 */
      DenseMatrix64F tempMatrix_1 = new DenseMatrix64F(N.numRows, N.numCols);
      CommonOps.mult(N, Qx_hat, tempMatrix_1);
      CommonOps.subEquals(
          tempMatrix_1, CommonOps.identity(tempMatrix_1.numRows, tempMatrix_1.numCols));
      double maxDeviation = CommonOps.elementMaxAbs(tempMatrix_1);
      if (maxDeviation > .01) {
        logger.severe(
            "COREGPM: maximum deviation N*inv(N) from unity = {}. This is larger than 0.01"
                + maxDeviation);
        throw new IllegalStateException("COREGPM: maximum deviation N*inv(N) from unity)");
      } else if (maxDeviation > .001) {
        logger.warning(
            "COREGPM: maximum deviation N*inv(N) from unity = {}. This is between 0.01 and 0.001"
                + maxDeviation);
      }
      logger.info("TIME FOR STABILITY CHECK: {}" + stopWatch.lap("Stability Check"));

      logger.info("Coeffs in Azimuth direction: {}" + rhsL.toString());
      logger.info("Coeffs in Range direction: {}" + rhsP.toString());
      logger.info("Max Deviation: {}" + maxDeviation);
      logger.info("System Quality: {}" + solver.quality());

      /** some other stuff if the scale is okay */
      DenseMatrix64F Qe_hat = new DenseMatrix64F(numObservations, numObservations);
      DenseMatrix64F tempMatrix_2 = new DenseMatrix64F(numObservations, numUnknowns);

      CommonOps.mult(A, Qx_hat, tempMatrix_2);
      CommonOps.multTransB(-1, tempMatrix_2, A, Qe_hat);
      scaleInputDiag(Qe_hat, Qy_1);

      // solution: Azimuth
      DenseMatrix64F yL_hat = new DenseMatrix64F(numObservations, 1);
      eL_hat = new DenseMatrix64F(numObservations, 1);
      CommonOps.mult(A, rhsL, yL_hat);
      CommonOps.sub(yL_matrix, yL_hat, eL_hat);

      // solution: Range
      DenseMatrix64F yP_hat = new DenseMatrix64F(numObservations, 1);
      eP_hat = new DenseMatrix64F(numObservations, 1);
      CommonOps.mult(A, rhsP, yP_hat);
      CommonOps.sub(yP_matrix, yP_hat, eP_hat);

      logger.info("TIME FOR DATA preparation for TESTING: {}" + stopWatch.lap("Testing Setup"));

      /** overal model test (variance factor) */
      double overAllModelTest_L = 0;
      double overAllModelTest_P = 0;

      for (int i = 0; i < numObservations; i++) {
        overAllModelTest_L += FastMath.pow(eL_hat.get(i), 2) * Qy_1.get(i);
        overAllModelTest_P += FastMath.pow(eP_hat.get(i), 2) * Qy_1.get(i);
      }

      overAllModelTest_L =
          (overAllModelTest_L / FastMath.pow(SIGMA_L, 2)) / (numObservations - numUnknowns);
      overAllModelTest_P =
          (overAllModelTest_P / FastMath.pow(SIGMA_P, 2)) / (numObservations - numUnknowns);

      logger.info("Overall Model Test Lines: {}" + overAllModelTest_L);
      logger.info("Overall Model Test Pixels: {}" + overAllModelTest_P);

      logger.info("TIME FOR OMT: {}" + stopWatch.lap("OMT"));

      /** ---------------------- DATASNOPING ----------------------------------- * */
      /** Assumed Qy diag */

      /** initialize */
      DenseMatrix64F wTest_L = new DenseMatrix64F(numObservations, 1);
      DenseMatrix64F wTest_P = new DenseMatrix64F(numObservations, 1);

      for (int i = 0; i < numObservations; i++) {
        wTest_L.set(i, eL_hat.get(i) / (Math.sqrt(Qe_hat.get(i, i)) * SIGMA_L));
        wTest_P.set(i, eP_hat.get(i) / (Math.sqrt(Qe_hat.get(i, i)) * SIGMA_P));
      }

      /** find maxima's */
      // azimuth
      winL = absArgmax(wTest_L);
      double maxWinL = Math.abs(wTest_L.get(winL));
      logger.info(
          "maximum wtest statistic azimuth = {} for window number: {} "
              + maxWinL
              + index.getQuick(winL));

      // range
      winP = absArgmax(wTest_P);
      double maxWinP = Math.abs(wTest_P.get(winP));
      logger.info(
          "maximum wtest statistic range = {} for window number: {} "
              + maxWinP
              + index.getQuick(winP));

      /** use summed wTest in Azimuth and Range direction for outlier detection */
      DenseMatrix64F wTestSum = new DenseMatrix64F(numObservations);
      for (int i = 0; i < numObservations; i++) {
        wTestSum.set(i, FastMath.pow(wTest_L.get(i), 2) + FastMath.pow(wTest_P.get(i), 2));
      }

      maxWSum_idx = absArgmax(wTest_P);
      double maxWSum = wTest_P.get(winP);
      logger.info(
          "Detected outlier: summed sqr.wtest = {}; observation: {}"
              + maxWSum
              + index.getQuick(maxWSum_idx));

      /** Test if we are estimationDone yet */
      // check on number of observations
      if (numObservations <= numUnknowns) {
        logger.warning("NO redundancy!  Exiting iterations.");
        estimationDone = true; // cannot remove more than this
      }

      // check on test k_alpha
      if (Math.max(maxWinL, maxWinP) <= criticalValue) {
        // all tests accepted?
        logger.info("All outlier tests accepted! (final solution computed)");
        estimationDone = true;
      }

      if (numIterations >= maxIterations) {
        logger.info("max. number of iterations reached (exiting loop).");
        estimationDone = true; // we reached max. (or no max_iter specified)
      }

      /** Only warn if last iteration has been estimationDone */
      if (estimationDone) {
        if (overAllModelTest_L > 10) {
          logger.warning(
              "COREGPM: Overall Model Test, Lines = {} is larger than 10. (Suggest model or a priori sigma not correct.)"
                  + overAllModelTest_L);
        }
        if (overAllModelTest_P > 10) {
          logger.warning(
              "COREGPM: Overall Model Test, Pixels = {} is larger than 10. (Suggest model or a priori sigma not correct.)"
                  + overAllModelTest_P);
        }

        /** if a priori sigma is correct, max wtest should be something like 1.96 */
        if (Math.max(maxWinL, maxWinP) > 200.0) {
          logger.warning(
              "Recommendation: remove window number: {} and re-run step COREGPM.  max. wtest is: {}."
                  + index.get(winL)
                  + Math.max(maxWinL, maxWinP));
        }
      }

      logger.info("TIME FOR wTestStatistics: {}" + stopWatch.lap("WTEST"));
      logger.info("Total Estimation TIME: {}" + clock.getElapsedTime());

      numIterations++; // update counter here!
    } // only warn when iterating

    yError = eL_hat.getData();
    xError = eP_hat.getData();

    yCoef = rhsL.getData();
    xCoef = rhsP.getData();
  }
コード例 #21
0
ファイル: Ellipsoid.java プロジェクト: fpl/s1tbx
 // first ecc.
 private static void set_ecc1st_sqr() {
   //  faster than e2=(sqr(a)-sqr(b))/sqr(a)
   e2 = 1.0 - FastMath.pow(b / a, 2);
 }
コード例 #22
0
  /** {@inheritDoc} */
  @Override
  public void integrate(final ExpandableStatefulODE equations, final double t)
      throws MathIllegalStateException, MathIllegalArgumentException {

    sanityChecks(equations, t);
    setEquations(equations);
    final boolean forward = t > equations.getTime();

    // create some internal working arrays
    final double[] y0 = equations.getCompleteState();
    final double[] y = y0.clone();
    final int stages = c.length + 1;
    final double[][] yDotK = new double[stages][y.length];
    final double[] yTmp = y0.clone();
    final double[] yDotTmp = new double[y.length];

    // set up an interpolator sharing the integrator arrays
    final RungeKuttaStepInterpolator interpolator = (RungeKuttaStepInterpolator) prototype.copy();
    interpolator.reinitialize(
        this, yTmp, yDotK, forward, equations.getPrimaryMapper(), equations.getSecondaryMappers());
    interpolator.storeTime(equations.getTime());

    // set up integration control objects
    stepStart = equations.getTime();
    double hNew = 0;
    boolean firstTime = true;
    initIntegration(equations.getTime(), y0, t);

    // main integration loop
    isLastStep = false;
    do {

      interpolator.shift();

      // iterate over step size, ensuring local normalized error is smaller than 1
      double error = 10;
      while (error >= 1.0) {

        if (firstTime || !fsal) {
          // first stage
          computeDerivatives(stepStart, y, yDotK[0]);
        }

        if (firstTime) {
          final double[] scale = new double[mainSetDimension];
          if (vecAbsoluteTolerance == null) {
            for (int i = 0; i < scale.length; ++i) {
              scale[i] = scalAbsoluteTolerance + scalRelativeTolerance * FastMath.abs(y[i]);
            }
          } else {
            for (int i = 0; i < scale.length; ++i) {
              scale[i] = vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * FastMath.abs(y[i]);
            }
          }
          hNew = initializeStep(forward, getOrder(), scale, stepStart, y, yDotK[0], yTmp, yDotK[1]);
          firstTime = false;
        }

        stepSize = hNew;
        if (forward) {
          if (stepStart + stepSize >= t) {
            stepSize = t - stepStart;
          }
        } else {
          if (stepStart + stepSize <= t) {
            stepSize = t - stepStart;
          }
        }

        // next stages
        for (int k = 1; k < stages; ++k) {

          for (int j = 0; j < y0.length; ++j) {
            double sum = a[k - 1][0] * yDotK[0][j];
            for (int l = 1; l < k; ++l) {
              sum += a[k - 1][l] * yDotK[l][j];
            }
            yTmp[j] = y[j] + stepSize * sum;
          }

          computeDerivatives(stepStart + c[k - 1] * stepSize, yTmp, yDotK[k]);
        }

        // estimate the state at the end of the step
        for (int j = 0; j < y0.length; ++j) {
          double sum = b[0] * yDotK[0][j];
          for (int l = 1; l < stages; ++l) {
            sum += b[l] * yDotK[l][j];
          }
          yTmp[j] = y[j] + stepSize * sum;
        }

        // estimate the error at the end of the step
        error = estimateError(yDotK, y, yTmp, stepSize);
        if (error >= 1.0) {
          // reject the step and attempt to reduce error by stepsize control
          final double factor =
              FastMath.min(
                  maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
          hNew = filterStep(stepSize * factor, forward, false);
        }
      }

      // local error is small enough: accept the step, trigger events and step handlers
      interpolator.storeTime(stepStart + stepSize);
      System.arraycopy(yTmp, 0, y, 0, y0.length);
      System.arraycopy(yDotK[stages - 1], 0, yDotTmp, 0, y0.length);
      stepStart = acceptStep(interpolator, y, yDotTmp, t);
      System.arraycopy(y, 0, yTmp, 0, y.length);

      if (!isLastStep) {

        // prepare next step
        interpolator.storeTime(stepStart);

        if (fsal) {
          // save the last evaluation for the next step
          System.arraycopy(yDotTmp, 0, yDotK[0], 0, y0.length);
        }

        // stepsize control for next step
        final double factor =
            FastMath.min(maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp)));
        final double scaledH = stepSize * factor;
        final double nextT = stepStart + scaledH;
        final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t);
        hNew = filterStep(scaledH, forward, nextIsLast);

        final double filteredNextT = stepStart + hNew;
        final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t);
        if (filteredNextIsLast) {
          hNew = t - stepStart;
        }
      }

    } while (!isLastStep);

    // dispatch results
    equations.setTime(stepStart);
    equations.setCompleteState(y);

    resetInternalState();
  }
コード例 #23
0
ファイル: Ellipsoid.java プロジェクト: fpl/s1tbx
 // second ecc.
 private static void set_ecc2nd_sqr() {
   // faster than e2b=(sqr(a)-sqr(b))/sqr(b);
   e2b = FastMath.pow(a / b, 2) - 1.0;
 }