コード例 #1
0
 public static void assertEigen(
     Matrix eigens,
     VectorIterable corpus,
     int numEigensToCheck,
     double errorMargin,
     boolean isSymmetric) {
   for (int i = 0; i < numEigensToCheck; i++) {
     Vector e = eigens.getRow(i);
     if (e.getLengthSquared() == 0) {
       continue;
     }
     Vector afterMultiply = isSymmetric ? corpus.times(e) : corpus.timesSquared(e);
     double dot = afterMultiply.dot(e);
     double afterNorm = afterMultiply.getLengthSquared();
     double error = 1 - dot / Math.sqrt(afterNorm * e.getLengthSquared());
     assertTrue(
         "Error margin: {" + error + " too high! (for eigen " + i + ')',
         Math.abs(error) < errorMargin);
   }
 }
コード例 #2
0
 /**
  * For the distributed case, the best guess at a useful initialization state for Lanczos we'll
  * chose to be uniform over all input dimensions, L_2 normalized.
  */
 public static Vector getInitialVector(VectorIterable corpus) {
   Vector initialVector = new DenseVector(corpus.numCols());
   initialVector.assign(1.0 / Math.sqrt(corpus.numCols()));
   return initialVector;
 }
コード例 #3
0
  /**
   * Solves the system Ax = b, where A is a linear operator and b is a vector. Uses the specified
   * preconditioner to improve numeric stability and possibly speed convergence. This version of
   * solve() allows control over the termination and iteration parameters.
   *
   * @param a The matrix A.
   * @param b The vector b.
   * @param preconditioner The preconditioner to apply.
   * @param maxIterations The maximum number of iterations to run.
   * @param maxError The maximum amount of residual error to tolerate. The algorithm will run until
   *     the residual falls below this value or until maxIterations are completed.
   * @return The result x of solving the system.
   * @throws IllegalArgumentException if the matrix is not square, if the size of b is not equal to
   *     the number of columns of A, if maxError is less than zero, or if maxIterations is not
   *     positive.
   */
  public Vector solve(
      VectorIterable a,
      Vector b,
      Preconditioner preconditioner,
      int maxIterations,
      double maxError) {

    if (a.numRows() != a.numCols()) {
      throw new IllegalArgumentException("Matrix must be square, symmetric and positive definite.");
    }

    if (a.numCols() != b.size()) {
      throw new CardinalityException(a.numCols(), b.size());
    }

    if (maxIterations <= 0) {
      throw new IllegalArgumentException("Max iterations must be positive.");
    }

    if (maxError < 0.0) {
      throw new IllegalArgumentException("Max error must be non-negative.");
    }

    Vector x = new DenseVector(b.size());

    iterations = 0;
    Vector residual = b.minus(a.times(x));
    residualNormSquared = residual.dot(residual);

    log.info("Conjugate gradient initial residual norm = {}", Math.sqrt(residualNormSquared));
    double previousConditionedNormSqr = 0.0;
    Vector updateDirection = null;
    while (Math.sqrt(residualNormSquared) > maxError && iterations < maxIterations) {
      Vector conditionedResidual;
      double conditionedNormSqr;
      if (preconditioner == null) {
        conditionedResidual = residual;
        conditionedNormSqr = residualNormSquared;
      } else {
        conditionedResidual = preconditioner.precondition(residual);
        conditionedNormSqr = residual.dot(conditionedResidual);
      }

      ++iterations;

      if (iterations == 1) {
        updateDirection = new DenseVector(conditionedResidual);
      } else {
        double beta = conditionedNormSqr / previousConditionedNormSqr;

        // updateDirection = residual + beta * updateDirection
        updateDirection.assign(Functions.MULT, beta);
        updateDirection.assign(conditionedResidual, Functions.PLUS);
      }

      Vector aTimesUpdate = a.times(updateDirection);

      double alpha = conditionedNormSqr / updateDirection.dot(aTimesUpdate);

      // x = x + alpha * updateDirection
      PLUS_MULT.setMultiplicator(alpha);
      x.assign(updateDirection, PLUS_MULT);

      // residual = residual - alpha * A * updateDirection
      PLUS_MULT.setMultiplicator(-alpha);
      residual.assign(aTimesUpdate, PLUS_MULT);

      previousConditionedNormSqr = conditionedNormSqr;
      residualNormSquared = residual.dot(residual);

      log.info(
          "Conjugate gradient iteration {} residual norm = {}",
          iterations,
          Math.sqrt(residualNormSquared));
    }
    return x;
  }