@Test public void testAddToVector() { TextValueEncoder enc = new TextValueEncoder("text"); Vector v1 = new DenseVector(200); enc.addToVector("test1 and more", v1); enc.flush(1, v1); // should set 6 distinct locations to 1 assertEquals(6.0, v1.norm(1), 0); assertEquals(1.0, v1.maxValue(), 0); // now some fancy weighting StaticWordValueEncoder w = new StaticWordValueEncoder("text"); w.setDictionary(ImmutableMap.<String, Double>of("word1", 3.0, "word2", 1.5)); enc.setWordEncoder(w); // should set 6 locations to something Vector v2 = new DenseVector(200); enc.addToVector("test1 and more", v2); enc.flush(1, v2); // this should set the same 6 locations to the same values Vector v3 = new DenseVector(200); w.addToVector("test1", v3); w.addToVector("and", v3); w.addToVector("more", v3); assertEquals(0, v3.minus(v2).norm(1), 0); // moreover, the locations set in the unweighted case should be the same as in the weighted case assertEquals(v3.zSum(), v3.dot(v1), 0); }
public static void assertEigen( Matrix eigens, VectorIterable corpus, int numEigensToCheck, double errorMargin, boolean isSymmetric) { for (int i = 0; i < numEigensToCheck; i++) { Vector e = eigens.getRow(i); if (e.getLengthSquared() == 0) { continue; } Vector afterMultiply = isSymmetric ? corpus.times(e) : corpus.timesSquared(e); double dot = afterMultiply.dot(e); double afterNorm = afterMultiply.getLengthSquared(); double error = 1 - dot / Math.sqrt(afterNorm * e.getLengthSquared()); assertTrue( "Error margin: {" + error + " too high! (for eigen " + i + ')', Math.abs(error) < errorMargin); } }
public static void assertOrthonormal(Matrix currentEigens, double errorMargin) { for (int i = 0; i < currentEigens.numRows(); i++) { Vector ei = currentEigens.getRow(i); for (int j = 0; j <= i; j++) { Vector ej = currentEigens.getRow(j); if (ei.norm(2) == 0 || ej.norm(2) == 0) { continue; } double dot = ei.dot(ej); if (i == j) { assertTrue( "not norm 1 : " + dot + " (eigen #" + i + ')', (Math.abs(1 - dot) < errorMargin)); } else { assertTrue( "not orthogonal : " + dot + " (eigens " + i + ", " + j + ')', Math.abs(dot) < errorMargin); } } } }
/** * Solves the system Ax = b, where A is a linear operator and b is a vector. Uses the specified * preconditioner to improve numeric stability and possibly speed convergence. This version of * solve() allows control over the termination and iteration parameters. * * @param a The matrix A. * @param b The vector b. * @param preconditioner The preconditioner to apply. * @param maxIterations The maximum number of iterations to run. * @param maxError The maximum amount of residual error to tolerate. The algorithm will run until * the residual falls below this value or until maxIterations are completed. * @return The result x of solving the system. * @throws IllegalArgumentException if the matrix is not square, if the size of b is not equal to * the number of columns of A, if maxError is less than zero, or if maxIterations is not * positive. */ public Vector solve( VectorIterable a, Vector b, Preconditioner preconditioner, int maxIterations, double maxError) { if (a.numRows() != a.numCols()) { throw new IllegalArgumentException("Matrix must be square, symmetric and positive definite."); } if (a.numCols() != b.size()) { throw new CardinalityException(a.numCols(), b.size()); } if (maxIterations <= 0) { throw new IllegalArgumentException("Max iterations must be positive."); } if (maxError < 0.0) { throw new IllegalArgumentException("Max error must be non-negative."); } Vector x = new DenseVector(b.size()); iterations = 0; Vector residual = b.minus(a.times(x)); residualNormSquared = residual.dot(residual); log.info("Conjugate gradient initial residual norm = {}", Math.sqrt(residualNormSquared)); double previousConditionedNormSqr = 0.0; Vector updateDirection = null; while (Math.sqrt(residualNormSquared) > maxError && iterations < maxIterations) { Vector conditionedResidual; double conditionedNormSqr; if (preconditioner == null) { conditionedResidual = residual; conditionedNormSqr = residualNormSquared; } else { conditionedResidual = preconditioner.precondition(residual); conditionedNormSqr = residual.dot(conditionedResidual); } ++iterations; if (iterations == 1) { updateDirection = new DenseVector(conditionedResidual); } else { double beta = conditionedNormSqr / previousConditionedNormSqr; // updateDirection = residual + beta * updateDirection updateDirection.assign(Functions.MULT, beta); updateDirection.assign(conditionedResidual, Functions.PLUS); } Vector aTimesUpdate = a.times(updateDirection); double alpha = conditionedNormSqr / updateDirection.dot(aTimesUpdate); // x = x + alpha * updateDirection PLUS_MULT.setMultiplicator(alpha); x.assign(updateDirection, PLUS_MULT); // residual = residual - alpha * A * updateDirection PLUS_MULT.setMultiplicator(-alpha); residual.assign(aTimesUpdate, PLUS_MULT); previousConditionedNormSqr = conditionedNormSqr; residualNormSquared = residual.dot(residual); log.info( "Conjugate gradient iteration {} residual norm = {}", iterations, Math.sqrt(residualNormSquared)); } return x; }
/** * Calculate a pdf using the supplied sample and stdDev * * @param x a Vector sample * @param sd a double std deviation */ private double pdf(Vector x, double sd) { double sd2 = sd * sd; double exp = -(x.dot(x) - 2 * x.dot(mean) + mean.dot(mean)) / (2 * sd2); double ex = Math.exp(exp); return ex / (sd * sqrt2pi); }