/** {@inheritDoc} */ public double[] gradient(double x, double[] parameters) { final double a = parameters[0]; final double omega = parameters[1]; final double phi = parameters[2]; final double alpha = omega * x + phi; final double cosAlpha = FastMath.cos(alpha); final double sinAlpha = FastMath.sin(alpha); return new double[] {cosAlpha, -a * x * sinAlpha, -a * sinAlpha}; }
/** * Calculates the Nth generalized harmonic number. See <a * href="http://mathworld.wolfram.com/HarmonicSeries.html">Harmonic Series</a>. * * @param n the term in the series to calculate (must be ≥ 1) * @param m the exponent; special case m == 1.0 is the harmonic series * @return the nth generalized harmonic number */ private double generalizedHarmonic(final int n, final double m) { double value = 0; for (int k = n; k > 0; --k) { value += 1.0 / FastMath.pow(k, m); } return value; }
public static NullStates getNullState(Context context) { final VehicleState state = context.getState(); // final Observation obs = context.getObservation(); final BlockStateObservation blockStateObs = state.getBlockStateObservation(); EVehiclePhase phase = state.getJourneyState().getPhase(); if (blockStateObs == null) { return NullStates.NULL_STATE; } else { final boolean hasScheduledTime = FastMath.abs(state.getBlockStateObservation().getScheduleDeviation()) > 0d; if (!hasScheduledTime) { return NullStates.NULL_STATE; } if (!state.getBlockStateObservation().isSnapped() && ((EVehiclePhase.DEADHEAD_AFTER == phase && state.getBlockStateObservation().getScheduleDeviation() == 0d) || EVehiclePhase.AT_BASE == phase || (EVehiclePhase.DEADHEAD_BEFORE == phase && state.getBlockStateObservation().getScheduleDeviation() == 0d) || (EVehiclePhase.LAYOVER_BEFORE == phase) && state.getBlockStateObservation().getScheduleDeviation() == 0d)) { return NullStates.NULL_STATE; } return NullStates.NON_NULL_STATE; } }
/** * The probability mass function P(X = x) for a Zipf distribution. * * @param x the value at which the probability density function is evaluated. * @return the value of the probability mass function at x */ public double probability(final int x) { if (x <= 0 || x > numberOfElements) { return 0.0; } return (1.0 / FastMath.pow(x, exponent)) / generalizedHarmonic(numberOfElements, exponent); }
/** * Get a vector orthogonal to the instance. * * <p>There are an infinite number of normalized vectors orthogonal to the instance. This method * picks up one of them almost arbitrarily. It is useful when one needs to compute a reference * frame with one of the axes in a predefined direction. The following example shows how to build * a frame having the k axis aligned with the known vector u : * * <pre><code> * Vector3D k = u.normalize(); * Vector3D i = k.orthogonal(); * Vector3D j = Vector3D.crossProduct(k, i); * </code></pre> * * @return a new normalized vector orthogonal to the instance * @exception ArithmeticException if the norm of the instance is null */ public Vector3D orthogonal() { double threshold = 0.6 * getNorm(); if (threshold == 0) { throw MathRuntimeException.createArithmeticException(LocalizedFormats.ZERO_NORM); } if ((x >= -threshold) && (x <= threshold)) { double inverse = 1 / FastMath.sqrt(y * y + z * z); return new Vector3D(0, inverse * z, -inverse * y); } else if ((y >= -threshold) && (y <= threshold)) { double inverse = 1 / FastMath.sqrt(x * x + z * z); return new Vector3D(-inverse * z, 0, inverse * x); } double inverse = 1 / FastMath.sqrt(x * x + y * y); return new Vector3D(inverse * y, -inverse * x, 0); }
/** * Returns the next pseudorandom, Gaussian ("normally") distributed {@code double} value with mean * {@code 0.0} and standard deviation {@code 1.0} from this random number generator's sequence. * * <p>The default implementation uses the <em>Polar Method</em> due to G.E.P. Box, M.E. Muller and * G. Marsaglia, as described in D. Knuth, <u>The Art of Computer Programming</u>, 3.4.1C. * * <p>The algorithm generates a pair of independent random values. One of these is cached for * reuse, so the full algorithm is not executed on each activation. Implementations that do not * override this method should make sure to call {@link #clear} to clear the cached value in the * implementation of {@link #setSeed(long)}. * * @return the next pseudorandom, Gaussian ("normally") distributed {@code double} value with mean * {@code 0.0} and standard deviation {@code 1.0} from this random number generator's sequence */ public double nextGaussian() { if (!Double.isNaN(cachedNormalDeviate)) { double dev = cachedNormalDeviate; cachedNormalDeviate = Double.NaN; return dev; } double v1 = 0; double v2 = 0; double s = 1; while (s >= 1) { v1 = 2 * nextDouble() - 1; v2 = 2 * nextDouble() - 1; s = v1 * v1 + v2 * v2; } if (s != 0) { s = FastMath.sqrt(-2 * FastMath.log(s) / s); } cachedNormalDeviate = v2 * s; return v1 * s; }
/** * Compute the angular separation between two vectors. * * <p>This method computes the angular separation between two vectors using the dot product for * well separated vectors and the cross product for almost aligned vectors. This allows to have a * good accuracy in all cases, even for vectors very close to each other. * * @param v1 first vector * @param v2 second vector * @return angular separation between v1 and v2 * @exception ArithmeticException if either vector has a null norm */ public static double angle(Vector3D v1, Vector3D v2) { double normProduct = v1.getNorm() * v2.getNorm(); if (normProduct == 0) { throw MathRuntimeException.createArithmeticException(LocalizedFormats.ZERO_NORM); } double dot = dotProduct(v1, v2); double threshold = normProduct * 0.9999; if ((dot < -threshold) || (dot > threshold)) { // the vectors are almost aligned, compute using the sine Vector3D v3 = crossProduct(v1, v2); if (dot >= 0) { return FastMath.asin(v3.getNorm() / normProduct); } return FastMath.PI - FastMath.asin(v3.getNorm() / normProduct); } // the vectors are sufficiently separated to use the cosine return FastMath.acos(dot / normProduct); }
private void calculateBlbSum( double[] sample, double samplingRate, double bagExp, final int numberOfBags, final int numberOfBootstraps, Mean perbootstrapTime, Mean perbagTime) { bagSums = new double[numberOfBags]; int bag_size = (int) FastMath.ceil(FastMath.pow(sample.length, bagExp)); int[] index = new int[sample.length]; for (int ii = 0; ii < sample.length; ii++) { index[ii] = ii; } int[] origIndex = index.clone(); long bootstrapTime = 0; Mean actualSum = new Mean(); for (int ii = 0; ii < numberOfBags; ii++) { SamplingUtilities.KnuthShuffle(index); double[] sampleBag = new double[bag_size]; for (int jj = 0; jj < bag_size; jj++) { sampleBag[jj] = sample[index[jj]]; } BootstrapSum sum = new BootstrapSum(sampleBag, samplingRate, numberOfBootstraps, sample.length); bagSums[ii] = sum.Sum(); actualSum.increment(bagSums[ii]); perbootstrapTime.increment(sum.getMeanTime()); bootstrapTime += sum.getTimes()[sum.getTimes().length - 1]; perbagTime.increment(sum.getTimes()[sum.getTimes().length - 1]); index = origIndex.clone(); } meanSum = actualSum.getResult(); totalTime = bootstrapTime; }
/** * Get the elevation of the vector. * * @return elevation (δ) of the vector, between -π/2 and +π/2 * @see #Vector3D(double, double) */ public double getDelta() { return FastMath.asin(z / getNorm()); }
/** * Get the azimuth of the vector. * * @return azimuth (α) of the vector, between -π and +π * @see #Vector3D(double, double) */ public double getAlpha() { return FastMath.atan2(y, x); }
/** * Get the L<sub>∞</sub> norm for the vector. * * @return L<sub>∞</sub> norm for the vector */ public double getNormInf() { return FastMath.max(FastMath.max(FastMath.abs(x), FastMath.abs(y)), FastMath.abs(z)); }
/** * Get the L<sub>2</sub> norm for the vector. * * @return euclidian norm for the vector */ public double getNorm() { return FastMath.sqrt(x * x + y * y + z * z); }
/** * Get the L<sub>1</sub> norm for the vector. * * @return L<sub>1</sub> norm for the vector */ public double getNorm1() { return FastMath.abs(x) + FastMath.abs(y) + FastMath.abs(z); }
/** * Returns a <code>double</code> whose value is <tt>(this<sup>exponent</sup>)</tt>, returning the * result in reduced form. * * @param exponent exponent to which this <code>BigFraction</code> is to be raised. * @return <tt>this<sup>exponent</sup></tt>. */ public double pow(final double exponent) { return FastMath.pow(numerator.doubleValue(), exponent) / FastMath.pow(denominator.doubleValue(), exponent); }
private void costantiniUnwrap() throws LPException { final int ny = wrappedPhase.rows - 1; // start from Zero! final int nx = wrappedPhase.columns - 1; // start from Zero! if (wrappedPhase.isVector()) throw new IllegalArgumentException("Input must be 2D array"); if (wrappedPhase.rows < 2 || wrappedPhase.columns < 2) throw new IllegalArgumentException("Size of input must be larger than 2"); // Default weight DoubleMatrix w1 = DoubleMatrix.ones(ny + 1, 1); w1.put(0, 0.5); w1.put(w1.length - 1, 0.5); DoubleMatrix w2 = DoubleMatrix.ones(1, nx + 1); w2.put(0, 0.5); w2.put(w2.length - 1, 0.5); DoubleMatrix weight = w1.mmul(w2); DoubleMatrix i, j, I_J, IP1_J, I_JP1; DoubleMatrix Psi1, Psi2; DoubleMatrix[] ROWS; // Compute partial derivative Psi1, eqt (1,3) i = intRangeDoubleMatrix(0, ny - 1); j = intRangeDoubleMatrix(0, nx); ROWS = grid2D(i, j); I_J = JblasUtils.sub2ind(wrappedPhase.rows, ROWS[0], ROWS[1]); IP1_J = JblasUtils.sub2ind(wrappedPhase.rows, ROWS[0].add(1), ROWS[1]); Psi1 = JblasUtils.getMatrixFromIdx(wrappedPhase, IP1_J) .sub(JblasUtils.getMatrixFromIdx(wrappedPhase, I_J)); Psi1 = UnwrapUtils.wrapDoubleMatrix(Psi1); // Compute partial derivative Psi2, eqt (2,4) i = intRangeDoubleMatrix(0, ny); j = intRangeDoubleMatrix(0, nx - 1); ROWS = grid2D(i, j); I_J = JblasUtils.sub2ind(wrappedPhase.rows, ROWS[0], ROWS[1]); I_JP1 = JblasUtils.sub2ind(wrappedPhase.rows, ROWS[0], ROWS[1].add(1)); Psi2 = JblasUtils.getMatrixFromIdx(wrappedPhase, I_JP1) .sub(JblasUtils.getMatrixFromIdx(wrappedPhase, I_J)); Psi2 = UnwrapUtils.wrapDoubleMatrix(Psi2); // Compute beq DoubleMatrix beq = DoubleMatrix.zeros(ny, nx); i = intRangeDoubleMatrix(0, ny - 1); j = intRangeDoubleMatrix(0, nx - 1); ROWS = grid2D(i, j); I_J = JblasUtils.sub2ind(Psi1.rows, ROWS[0], ROWS[1]); I_JP1 = JblasUtils.sub2ind(Psi1.rows, ROWS[0], ROWS[1].add(1)); beq.addi(JblasUtils.getMatrixFromIdx(Psi1, I_JP1).sub(JblasUtils.getMatrixFromIdx(Psi1, I_J))); I_J = JblasUtils.sub2ind(Psi2.rows, ROWS[0], ROWS[1]); I_JP1 = JblasUtils.sub2ind(Psi2.rows, ROWS[0].add(1), ROWS[1]); beq.subi(JblasUtils.getMatrixFromIdx(Psi2, I_JP1).sub(JblasUtils.getMatrixFromIdx(Psi2, I_J))); beq.muli(-1 / (2 * Constants._PI)); for (int k = 0; k < beq.length; k++) { beq.put(k, Math.round(beq.get(k))); } beq.reshape(beq.length, 1); logger.debug("Constraint matrix"); i = intRangeDoubleMatrix(0, ny - 1); j = intRangeDoubleMatrix(0, nx - 1); ROWS = grid2D(i, j); DoubleMatrix ROW_I_J = JblasUtils.sub2ind(i.length, ROWS[0], ROWS[1]); int nS0 = nx * ny; // Use by S1p, S1m DoubleMatrix[] COLS; COLS = grid2D(i, j); DoubleMatrix COL_IJ_1 = JblasUtils.sub2ind(i.length, COLS[0], COLS[1]); COLS = grid2D(i, j.add(1)); DoubleMatrix COL_I_JP1 = JblasUtils.sub2ind(i.length, COLS[0], COLS[1]); int nS1 = (nx + 1) * (ny); // SOAPBinding.Use by S2p, S2m COLS = grid2D(i, j); DoubleMatrix COL_IJ_2 = JblasUtils.sub2ind(i.length + 1, COLS[0], COLS[1]); COLS = grid2D(i.add(1), j); DoubleMatrix COL_IP1_J = JblasUtils.sub2ind(i.length + 1, COLS[0], COLS[1]); int nS2 = nx * (ny + 1); // Equality constraint matrix (Aeq) /* S1p = + sparse(ROW_I_J, COL_I_JP1,1,nS0,nS1) ... - sparse(ROW_I_J, COL_IJ_1,1,nS0,nS1); S1m = -S1p; S2p = - sparse(ROW_I_J, COL_IP1_J,1,nS0,nS2) ... + sparse(ROW_I_J, COL_IJ_2,1,nS0,nS2); S2m = -S2p; */ // ToDo: Aeq matrix should be sparse from it's initialization, look into JblasMatrix factory for // howto // ...otherwise even a data set of eg 40x40 pixels will exhaust heap: // ... dimension of Aeq (equality constraints) matrix for 30x30 input is 1521x6240 matrix // ... dimension of Aeq ( ) matrix for 50x50 input is 2401x9800 // ... dimension of Aeq ( ) matrix for 512x512 input is 261121x1046528 DoubleMatrix S1p = JblasUtils.setUpMatrixFromIdx(nS0, nS1, ROW_I_J, COL_I_JP1) .sub(JblasUtils.setUpMatrixFromIdx(nS0, nS1, ROW_I_J, COL_IJ_1)); DoubleMatrix S1m = S1p.neg(); DoubleMatrix S2p = JblasUtils.setUpMatrixFromIdx(nS0, nS2, ROW_I_J, COL_IP1_J) .neg() .add(JblasUtils.setUpMatrixFromIdx(nS0, nS2, ROW_I_J, COL_IJ_2)); DoubleMatrix S2m = S2p.neg(); DoubleMatrix Aeq = concatHorizontally(concatHorizontally(S1p, S1m), concatHorizontally(S2p, S2m)); final int nObs = Aeq.columns; final int nUnkn = Aeq.rows; DoubleMatrix c1 = JblasUtils.getMatrixFromRange(0, ny, 0, weight.columns, weight); DoubleMatrix c2 = JblasUtils.getMatrixFromRange(0, weight.rows, 0, nx, weight); c1.reshape(c1.length, 1); c2.reshape(c2.length, 1); DoubleMatrix cost = DoubleMatrix.concatVertically( DoubleMatrix.concatVertically(c1, c1), DoubleMatrix.concatVertically(c2, c2)); logger.debug("Minimum network flow resolution"); StopWatch clockLP = new StopWatch(); LinearProgram lp = new LinearProgram(cost.data); lp.setMinProblem(true); boolean[] integerBool = new boolean[nObs]; double[] lowerBound = new double[nObs]; double[] upperBound = new double[nObs]; for (int k = 0; k < nUnkn; k++) { lp.addConstraint(new LinearEqualsConstraint(Aeq.getRow(k).toArray(), beq.get(k), "cost")); } for (int k = 0; k < nObs; k++) { integerBool[k] = true; lowerBound[k] = 0; upperBound[k] = 99999; } // setup bounds and integer nature lp.setIsinteger(integerBool); lp.setUpperbound(upperBound); lp.setLowerbound(lowerBound); LinearProgramSolver solver = SolverFactory.newDefault(); // double[] solution; // solution = solver.solve(lp); DoubleMatrix solution = new DoubleMatrix(solver.solve(lp)); clockLP.stop(); logger.debug("Total GLPK time: {} [sec]", (double) (clockLP.getElapsedTime()) / 1000); // Displatch the LP solution int offset; int[] idx1p = JblasUtils.intRangeIntArray(0, nS1 - 1); DoubleMatrix x1p = solution.get(idx1p); x1p.reshape(ny, nx + 1); offset = idx1p[nS1 - 1] + 1; int[] idx1m = JblasUtils.intRangeIntArray(offset, offset + nS1 - 1); DoubleMatrix x1m = solution.get(idx1m); x1m.reshape(ny, nx + 1); offset = idx1m[idx1m.length - 1] + 1; int[] idx2p = JblasUtils.intRangeIntArray(offset, offset + nS2 - 1); DoubleMatrix x2p = solution.get(idx2p); x2p.reshape(ny + 1, nx); offset = idx2p[idx2p.length - 1] + 1; int[] idx2m = JblasUtils.intRangeIntArray(offset, offset + nS2 - 1); DoubleMatrix x2m = solution.get(idx2m); x2m.reshape(ny + 1, nx); // Compute the derivative jumps, eqt (20,21) DoubleMatrix k1 = x1p.sub(x1m); DoubleMatrix k2 = x2p.sub(x2m); // (?) Round to integer solution if (roundK == true) { for (int idx = 0; idx < k1.length; idx++) { k1.put(idx, FastMath.floor(k1.get(idx))); } for (int idx = 0; idx < k2.length; idx++) { k2.put(idx, FastMath.floor(k2.get(idx))); } } // Sum the jumps with the wrapped partial derivatives, eqt (10,11) k1.reshape(ny, nx + 1); k2.reshape(ny + 1, nx); k1.addi(Psi1.div(Constants._TWO_PI)); k2.addi(Psi2.div(Constants._TWO_PI)); // Integrate the partial derivatives, eqt (6) // cumsum() method in JblasTester -> see cumsum_demo() in JblasTester.cumsum_demo() DoubleMatrix k2_temp = DoubleMatrix.concatHorizontally(DoubleMatrix.zeros(1), k2.getRow(0)); k2_temp = JblasUtils.cumsum(k2_temp, 1); DoubleMatrix k = DoubleMatrix.concatVertically(k2_temp, k1); k = JblasUtils.cumsum(k, 1); // Unwrap - final solution unwrappedPhase = k.mul(Constants._TWO_PI); }
/** * Compute the distance between two vectors according to the L<sub>2</sub> norm. * * <p>Calling this method is equivalent to calling: <code>v1.subtract(v2).getNorm()</code> except * that no intermediate vector is built * * @param v1 first vector * @param v2 second vector * @return the distance between v1 and v2 according to the L<sub>2</sub> norm */ public static double distance(Vector3D v1, Vector3D v2) { final double dx = v2.x - v1.x; final double dy = v2.y - v1.y; final double dz = v2.z - v1.z; return FastMath.sqrt(dx * dx + dy * dy + dz * dz); }
public class DenseFeatureMatrix { int inputSize; int outputSize; INDArray features; INDArray featuresT; GradientStore gradientStore = new GradientStore(); double l2 = GlobalParameters.l2regularizerLambdaDefault; double learningRate = GlobalParameters.learningRateDefault; // adagrad vars boolean useAdagrad = GlobalParameters.useAdagradDefault; INDArray adagradQuotient; double adagradEps = 0.001; double adagradMax = 10; // gaussian noise double noiseVar = GlobalParameters.noiseDevDefault; double noiseVarSqrt = FastMath.sqrt(noiseVar);; HashMap<Integer, INDArray> currentNoise = new HashMap<Integer, INDArray>(); // momentum vars boolean useMomentum = GlobalParameters.useMomentumDefault; INDArray momentumPrevUpdate; double momentum = GlobalParameters.momentumDefault; // adadelta vars boolean useAdadelta = GlobalParameters.useAdadeltaDefault; INDArray adadeltaRMSGradient; INDArray adadeltaRMSUpdate; double adadeltaMomentum = GlobalParameters.adadeltaMomentumDefault; double adadeltaEps = GlobalParameters.adadeltaEpsDefault; // commit int commitMethod = GlobalParameters.commitMethodDefault; public DenseFeatureMatrix( int inputSize, int outputSize, boolean useAdagrad, boolean useMomentum, boolean useAdadelta) { if (inputSize == 1) { throw new RuntimeException("input size = 1: use vector instead"); } this.inputSize = inputSize; this.outputSize = outputSize; this.useAdadelta = useAdadelta; this.useAdagrad = useAdagrad; this.useMomentum = useMomentum; if (useAdagrad) { adagradQuotient = Nd4j.zeros(inputSize, outputSize); adagradQuotient.addi(adagradEps); } if (useMomentum) { momentumPrevUpdate = Nd4j.zeros(inputSize, outputSize); } if (useAdadelta) { adadeltaRMSGradient = Nd4j.zeros(inputSize, outputSize); adadeltaRMSUpdate = Nd4j.zeros(inputSize, outputSize); } } public DenseFeatureMatrix(int inputSize, int outputSize) { if (inputSize == 1) { throw new RuntimeException("input size = 1: use vector instead"); } this.inputSize = inputSize; this.outputSize = outputSize; if (useAdagrad) { adagradQuotient = Nd4j.zeros(inputSize, outputSize); adagradQuotient.addi(adagradEps); } if (useMomentum) { momentumPrevUpdate = Nd4j.zeros(inputSize, outputSize); } if (useAdadelta) { adadeltaRMSGradient = Nd4j.zeros(inputSize, outputSize); adadeltaRMSUpdate = Nd4j.zeros(inputSize, outputSize); } } public void initialize(double[][] vals) { features = Nd4j.create(vals); featuresT = features.transpose(); } public void initializeUniform(double min, double max) { double[][] featuresMatrixStub = new double[inputSize][outputSize]; RandomUtils.initializeRandomMatrix(featuresMatrixStub, min, max, 1); features = Nd4j.create(featuresMatrixStub); featuresT = features.transpose(); } public void normalizedInitializationHtan(int fanin, int fanout) { double max = Math.sqrt(6.0d / (fanout + fanin)); double[][] featuresMatrixStub = new double[inputSize][outputSize]; RandomUtils.initializeRandomMatrix(featuresMatrixStub, -max, max, 1); features = Nd4j.create(featuresMatrixStub); featuresT = features.transpose(); } public void normalizedInitializationSigmoid(int fanin, int fanout) { double max = 4 * Math.sqrt(6.0d / (fanin + fanout)); double[][] featuresMatrixStub = new double[inputSize][outputSize]; RandomUtils.initializeRandomMatrix(featuresMatrixStub, -max, max, 1); features = Nd4j.create(featuresMatrixStub); featuresT = features.transpose(); } public INDArray getWeights() { return features; } public INDArray getTranspose() { return featuresT; } public void storeGradients(int processId, INDArray gradient) { gradientStore.addGradient(processId, gradient); } public void storeInputsAndOutputs(int id, INDArray x, INDArray yGrad) { gradientStore.addInputAndOutput(id, x, yGrad); } public void checkinGradients(int id) { gradientStore.computeGradientAndAdd(id); } public void update() { INDArray gradient = null; if (commitMethod == 0) { gradient = gradientStore.getGradientAvg(); } else { gradient = gradientStore.getGradientSum(); } if (gradient == null) return; INDArray gradientL2 = gradient.sub(features.mul(l2)); if (useAdagrad) { getAdagradGradient(gradientL2); features.addi(gradientL2.mul(learningRate)); } else if (useMomentum) { getMomentumGradient(gradientL2); features.addi(gradientL2.mul(learningRate)); } else if (useAdadelta) { getAdadeltaGradient(gradientL2); features.addi(gradientL2); } else { features.addi(gradientL2.mul(learningRate)); } capValues(GlobalParameters.maxVal); featuresT = features.transpose(); gradientStore.init(); } protected void getAdagradGradient(INDArray gradient) { adagradQuotient.addi(gradient.mul(gradient)); for (int i = 0; i < inputSize; i++) { for (int j = 0; j < outputSize; j++) { double adagradQ = adagradQuotient.getDouble(i, j); if (adagradMax < adagradQ) { adagradQuotient.putScalar(new int[] {i, j}, adagradMax); adagradQ = adagradMax; } gradient.putScalar(new int[] {i, j}, gradient.getDouble(i, j) / Math.sqrt(adagradQ)); } } } protected void getAdadeltaGradient(INDArray gradient) { adadeltaRMSGradient = adadeltaRMSGradient .mul(adadeltaMomentum) .add(gradient.mul(gradient).mul(1 - adadeltaMomentum)); gradient.muli( Transforms.sqrt(adadeltaRMSUpdate.add(adadeltaEps)) .div(Transforms.sqrt(adadeltaRMSGradient.add(adadeltaEps)))); adadeltaRMSUpdate.mul(adadeltaMomentum).add(gradient.mul(gradient).mul(1 - adadeltaMomentum)); } protected void getMomentumGradient(INDArray gradient) { INDArray momemtumUpdate = momentumPrevUpdate.mul(momentum); gradient.addi(momemtumUpdate); momentumPrevUpdate = gradient.dup(); } public INDArray genGaussianNoise(int id) { if (!currentNoise.containsKey(id)) { INDArray zeroMean = Nd4j.zeros(inputSize, outputSize); currentNoise.put(id, Sampling.normal(RandomUtils.getRandomGenerator(id), zeroMean, noiseVar)); } else { RealDistribution reals = new NormalDistribution( RandomUtils.getRandomGenerator(id), 0, noiseVarSqrt, NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY); INDArrayUtils.shiftLeft( currentNoise.get(id), inputSize, outputSize, RandomUtils.getRandomGenerator(id).nextInt(inputSize * outputSize), reals.sample()); } // currentNoise = Sampling.normal(RandomUtils.getRandomGenerator(id), zeroMean, noiseVar); return currentNoise.get(id); } public void capValues(double max) { INDArray linear = features.linearView(); for (int i = 0; i < linear.size(0); i++) { linear.putScalar(i, Math.max(-max, Math.min(max, linear.getDouble(i)))); } } public void save(PrintStream out) { out.println(inputSize); out.println(outputSize); out.println(useAdagrad); out.println(adagradEps); out.println(adagradMax); out.println(noiseVar); out.println(useMomentum); out.println(momentum); out.println(useAdadelta); out.println(adadeltaEps); out.println(adadeltaMomentum); saveMatrix(features, out); if (useAdagrad) { saveMatrix(adagradQuotient, out); } if (useMomentum) { saveMatrix(momentumPrevUpdate, out); } if (useAdadelta) { saveMatrix(adadeltaRMSGradient, out); saveMatrix(adadeltaRMSUpdate, out); } } public void saveMatrix(INDArray matrix, PrintStream out) { for (int row = 0; row < inputSize; row++) { for (int col = 0; col < outputSize; col++) { double val = matrix.getDouble(row, col); if (col < outputSize - 1) { out.print(val + " "); } else { out.println(val); } } } } public static DenseFeatureMatrix load(BufferedReader in) { try { int inputSize = Integer.parseInt(in.readLine()); int outputSize = Integer.parseInt(in.readLine()); DenseFeatureMatrix matrix = new DenseFeatureMatrix(inputSize, outputSize); matrix.useAdagrad = Boolean.parseBoolean(in.readLine()); matrix.adagradEps = Double.parseDouble(in.readLine()); matrix.adagradMax = Double.parseDouble(in.readLine()); matrix.noiseVar = Double.parseDouble(in.readLine()); matrix.useMomentum = Boolean.parseBoolean(in.readLine()); matrix.momentum = Double.parseDouble(in.readLine()); matrix.useAdadelta = Boolean.parseBoolean(in.readLine()); matrix.adadeltaEps = Double.parseDouble(in.readLine()); matrix.adadeltaMomentum = Double.parseDouble(in.readLine()); matrix.features = loadMatrix(in, inputSize, outputSize); if (matrix.useAdagrad) { matrix.adagradQuotient = loadMatrix(in, inputSize, outputSize); } if (matrix.useMomentum) { matrix.momentumPrevUpdate = loadMatrix(in, inputSize, outputSize); } if (matrix.useAdadelta) { matrix.adadeltaRMSGradient = loadMatrix(in, inputSize, outputSize); matrix.adadeltaRMSUpdate = loadMatrix(in, inputSize, outputSize); } matrix.featuresT = matrix.features.transpose(); return matrix; } catch (Exception e) { throw new RuntimeException(e); } } public static INDArray loadMatrix(BufferedReader in, int inputSize, int outputSize) throws IOException { INDArray matrix = Nd4j.create(inputSize, outputSize); for (int row = 0; row < inputSize; row++) { String[] vals = in.readLine().split("\\s+"); for (int col = 0; col < outputSize; col++) { matrix.putScalar(new int[] {row, col}, Double.parseDouble(vals[col])); } } return matrix; } public void setL2(double l2) { this.l2 = l2; } public static void main(String[] args) { DenseFeatureMatrix matrix = new DenseFeatureMatrix(1, 5); matrix.initializeUniform(-0.1, 0.1); matrix.save(IOUtils.getPrintStream("/tmp/file")); DenseFeatureMatrix.load(IOUtils.getReader("/tmp/file")).save(System.err); } public void normalize() { features.divi(features.sum(0).getDouble(0) * inputSize); featuresT = features.transpose(); } }
/** {@inheritDoc} */ public double value(double x, double[] parameters) { final double a = parameters[0]; final double omega = parameters[1]; final double phi = parameters[2]; return a * FastMath.cos(omega * x + phi); }
public static void main(String[] args) { TreeMap<Double, List<int[]>> voiceLeadingMap = new TreeMap<Double, List<int[]>>(); // List<int[]> vl = voiceLeading2Voices(); // List<int[]> vl = voiceLeading3Voices(); // System.out.println(vl.size()); // for (int[] is : vl) { // System.out.print("[" ); // for (int i : is) { // System.out.print(i + ","); // } // System.out.print("]" ); // System.out.println(); // } // for (int[] is : vl) { // double vlDegree = Utilities.round(analyseVoiceLeading(is),3); // List<int[]> ch = voiceLeadingMap.get(vlDegree); // if(ch == null){ // voiceLeadingMap.put(vlDegree, ch = new ArrayList<int[]>()); // } // ch.add(is); // // } // // Set<Double> keys = voiceLeadingMap.keySet(); // for (Double key : keys) { // List<int[]> ch = voiceLeadingMap.get(key); //// System.out.println(); // System.out.println("size group: " + ch.size()); // for (int[] is : ch) { // System.out.print(" vl : " + key); // System.out.print("[" ); // for (int i : is) { // System.out.print(i + ","); // } // System.out.print("]" ); //// System.out.println(); // } // System.out.println(); // // } double[] firstChord = {60.0, 50.0, 60.0}; double[] secondChord = {60.0, 55.0, 0.0}; double sum = 0; for (int i = 0; i < firstChord.length; i++) { if (firstChord[i] != 0 && secondChord[i] != 0) { sum += FastMath.abs(firstChord[i] - secondChord[i]); } } // double smoothnes = MathUtils.distance1(vec1,vec2); System.out.println("smoothness: " + sum); Note[] chord1 = new Note[4]; chord1[0] = new Note(60, 1.0); chord1[1] = new Note(60, 1.0); chord1[2] = new Note(60, 1.0); chord1[3] = new Note(60, 1.0); Note[] chord2 = new Note[4]; chord2[0] = new Note(60, 1.0); chord2[1] = new Note(70, 1.0); chord2[2] = new Note(70, 1.0); chord2[3] = new Note(60, 1.0); // Calculates the L2 (Euclidean) distance between two points. double euclideanDistance = euclideanDistance(chord1, chord2); System.out.println("euclideanDistance: " + euclideanDistance); // Smoothness: Calculates the L1 (sum of abs) distance between two points. double smoothness = taxiCab(chord1, chord2); System.out.println("smoothness: " + smoothness); // Calculates the L infinite (max of abs) distance between two points. double lInfDistance = infiniteDistance(chord1, chord2); System.out.println("lInfDistance: " + lInfDistance); }
/** * Compute the distance between two vectors according to the L<sub>1</sub> norm. * * <p>Calling this method is equivalent to calling: <code>v1.subtract(v2).getNorm1()</code> except * that no intermediate vector is built * * @param v1 first vector * @param v2 second vector * @return the distance between v1 and v2 according to the L<sub>1</sub> norm */ public static double distance1(Vector3D v1, Vector3D v2) { final double dx = FastMath.abs(v2.x - v1.x); final double dy = FastMath.abs(v2.y - v1.y); final double dz = FastMath.abs(v2.z - v1.z); return dx + dy + dz; }
/** * Create a fraction given the double value and either the maximum error allowed or the maximum * number of denominator digits. * * <p>NOTE: This constructor is called with EITHER - a valid epsilon value and the maxDenominator * set to Integer.MAX_VALUE (that way the maxDenominator has no effect). OR - a valid * maxDenominator value and the epsilon value set to zero (that way epsilon only has effect if * there is an exact match before the maxDenominator value is reached). * * <p>It has been done this way so that the same code can be (re)used for both scenarios. However * this could be confusing to users if it were part of the public API and this constructor should * therefore remain PRIVATE. See JIRA issue ticket MATH-181 for more details: * * <p>https://issues.apache.org/jira/browse/MATH-181 * * @param value the double value to convert to a fraction. * @param epsilon maximum error allowed. The resulting fraction is within <code>epsilon</code> of * <code>value</code>, in absolute terms. * @param maxDenominator maximum denominator value allowed. * @param maxIterations maximum number of convergents. * @throws FractionConversionException if the continued fraction failed to converge. */ private BigFraction( final double value, final double epsilon, final int maxDenominator, int maxIterations) throws FractionConversionException { long overflow = Integer.MAX_VALUE; double r0 = value; long a0 = (long) FastMath.floor(r0); if (a0 > overflow) { throw new FractionConversionException(value, a0, 1l); } // check for (almost) integer arguments, which should not go // to iterations. if (FastMath.abs(a0 - value) < epsilon) { numerator = BigInteger.valueOf(a0); denominator = BigInteger.ONE; return; } long p0 = 1; long q0 = 0; long p1 = a0; long q1 = 1; long p2 = 0; long q2 = 1; int n = 0; boolean stop = false; do { ++n; final double r1 = 1.0 / (r0 - a0); final long a1 = (long) FastMath.floor(r1); p2 = (a1 * p1) + p0; q2 = (a1 * q1) + q0; if ((p2 > overflow) || (q2 > overflow)) { throw new FractionConversionException(value, p2, q2); } final double convergent = (double) p2 / (double) q2; if ((n < maxIterations) && (FastMath.abs(convergent - value) > epsilon) && (q2 < maxDenominator)) { p0 = p1; p1 = p2; q0 = q1; q1 = q2; a0 = a1; r0 = r1; } else { stop = true; } } while (!stop); if (n >= maxIterations) { throw new FractionConversionException(value, maxIterations); } if (q2 < maxDenominator) { numerator = BigInteger.valueOf(p2); denominator = BigInteger.valueOf(q2); } else { numerator = BigInteger.valueOf(p1); denominator = BigInteger.valueOf(q1); } }
/** * Compute the distance between two vectors according to the L<sub>∞</sub> norm. * * <p>Calling this method is equivalent to calling: <code>v1.subtract(v2).getNormInf()</code> * except that no intermediate vector is built * * @param v1 first vector * @param v2 second vector * @return the distance between v1 and v2 according to the L<sub>∞</sub> norm */ public static double distanceInf(Vector3D v1, Vector3D v2) { final double dx = FastMath.abs(v2.x - v1.x); final double dy = FastMath.abs(v2.y - v1.y); final double dz = FastMath.abs(v2.z - v1.z); return FastMath.max(FastMath.max(dx, dy), dz); }
/** {@inheritDoc} */ @Override public void integrate(final ExpandableStatefulODE equations, final double t) throws MathIllegalStateException, MathIllegalArgumentException { sanityChecks(equations, t); setEquations(equations); final boolean forward = t > equations.getTime(); // create some internal working arrays final double[] y0 = equations.getCompleteState(); final double[] y = y0.clone(); final int stages = c.length + 1; final double[][] yDotK = new double[stages][y.length]; final double[] yTmp = y0.clone(); final double[] yDotTmp = new double[y.length]; // set up an interpolator sharing the integrator arrays final RungeKuttaStepInterpolator interpolator = (RungeKuttaStepInterpolator) prototype.copy(); interpolator.reinitialize( this, yTmp, yDotK, forward, equations.getPrimaryMapper(), equations.getSecondaryMappers()); interpolator.storeTime(equations.getTime()); // set up integration control objects stepStart = equations.getTime(); double hNew = 0; boolean firstTime = true; initIntegration(equations.getTime(), y0, t); // main integration loop isLastStep = false; do { interpolator.shift(); // iterate over step size, ensuring local normalized error is smaller than 1 double error = 10; while (error >= 1.0) { if (firstTime || !fsal) { // first stage computeDerivatives(stepStart, y, yDotK[0]); } if (firstTime) { final double[] scale = new double[mainSetDimension]; if (vecAbsoluteTolerance == null) { for (int i = 0; i < scale.length; ++i) { scale[i] = scalAbsoluteTolerance + scalRelativeTolerance * FastMath.abs(y[i]); } } else { for (int i = 0; i < scale.length; ++i) { scale[i] = vecAbsoluteTolerance[i] + vecRelativeTolerance[i] * FastMath.abs(y[i]); } } hNew = initializeStep(forward, getOrder(), scale, stepStart, y, yDotK[0], yTmp, yDotK[1]); firstTime = false; } stepSize = hNew; if (forward) { if (stepStart + stepSize >= t) { stepSize = t - stepStart; } } else { if (stepStart + stepSize <= t) { stepSize = t - stepStart; } } // next stages for (int k = 1; k < stages; ++k) { for (int j = 0; j < y0.length; ++j) { double sum = a[k - 1][0] * yDotK[0][j]; for (int l = 1; l < k; ++l) { sum += a[k - 1][l] * yDotK[l][j]; } yTmp[j] = y[j] + stepSize * sum; } computeDerivatives(stepStart + c[k - 1] * stepSize, yTmp, yDotK[k]); } // estimate the state at the end of the step for (int j = 0; j < y0.length; ++j) { double sum = b[0] * yDotK[0][j]; for (int l = 1; l < stages; ++l) { sum += b[l] * yDotK[l][j]; } yTmp[j] = y[j] + stepSize * sum; } // estimate the error at the end of the step error = estimateError(yDotK, y, yTmp, stepSize); if (error >= 1.0) { // reject the step and attempt to reduce error by stepsize control final double factor = FastMath.min( maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp))); hNew = filterStep(stepSize * factor, forward, false); } } // local error is small enough: accept the step, trigger events and step handlers interpolator.storeTime(stepStart + stepSize); System.arraycopy(yTmp, 0, y, 0, y0.length); System.arraycopy(yDotK[stages - 1], 0, yDotTmp, 0, y0.length); stepStart = acceptStep(interpolator, y, yDotTmp, t); System.arraycopy(y, 0, yTmp, 0, y.length); if (!isLastStep) { // prepare next step interpolator.storeTime(stepStart); if (fsal) { // save the last evaluation for the next step System.arraycopy(yDotTmp, 0, yDotK[0], 0, y0.length); } // stepsize control for next step final double factor = FastMath.min(maxGrowth, FastMath.max(minReduction, safety * FastMath.pow(error, exp))); final double scaledH = stepSize * factor; final double nextT = stepStart + scaledH; final boolean nextIsLast = forward ? (nextT >= t) : (nextT <= t); hNew = filterStep(scaledH, forward, nextIsLast); final double filteredNextT = stepStart + hNew; final boolean filteredNextIsLast = forward ? (filteredNextT >= t) : (filteredNextT <= t); if (filteredNextIsLast) { hNew = t - stepStart; } } } while (!isLastStep); // dispatch results equations.setTime(stepStart); equations.setCompleteState(y); resetInternalState(); }
/** * Simple constructor. Build a vector from its azimuthal coordinates * * @param alpha azimuth (α) around Z (0 is +X, π/2 is +Y, π is -X and 3π/2 is -Y) * @param delta elevation (δ) above (XY) plane, from -π/2 to +π/2 * @see #getAlpha() * @see #getDelta() */ public Vector3D(double alpha, double delta) { double cosDelta = FastMath.cos(delta); this.x = FastMath.cos(alpha) * cosDelta; this.y = FastMath.sin(alpha) * cosDelta; this.z = FastMath.sin(delta); }
/** {@inheritDoc} */ protected final double doSolve() { // Get initial solution double x0 = getMin(); double x1 = getMax(); double f0 = computeObjectiveValue(x0); double f1 = computeObjectiveValue(x1); // If one of the bounds is the exact root, return it. Since these are // not under-approximations or over-approximations, we can return them // regardless of the allowed solutions. if (f0 == 0.0) { return x0; } if (f1 == 0.0) { return x1; } // Verify bracketing of initial solution. verifyBracketing(x0, x1); // Get accuracies. final double ftol = getFunctionValueAccuracy(); final double atol = getAbsoluteAccuracy(); final double rtol = getRelativeAccuracy(); // Keep track of inverted intervals, meaning that the left bound is // larger than the right bound. boolean inverted = false; // Keep finding better approximations. while (true) { // Calculate the next approximation. final double x = x1 - ((f1 * (x1 - x0)) / (f1 - f0)); final double fx = computeObjectiveValue(x); // If the new approximation is the exact root, return it. Since // this is not an under-approximation or an over-approximation, // we can return it regardless of the allowed solutions. if (fx == 0.0) { return x; } // Update the bounds with the new approximation. if (f1 * fx < 0) { // The value of x1 has switched to the other bound, thus inverting // the interval. x0 = x1; f0 = f1; inverted = !inverted; } else { switch (method) { case ILLINOIS: f0 *= 0.5; break; case PEGASUS: f0 *= f1 / (f1 + fx); break; case REGULA_FALSI: // Nothing. break; default: // Should never happen. throw new MathInternalError(); } } // Update from [x0, x1] to [x0, x]. x1 = x; f1 = fx; // If the function value of the last approximation is too small, // given the function value accuracy, then we can't get closer to // the root than we already are. if (FastMath.abs(f1) <= ftol) { switch (allowed) { case ANY_SIDE: return x1; case LEFT_SIDE: if (inverted) { return x1; } break; case RIGHT_SIDE: if (!inverted) { return x1; } break; case BELOW_SIDE: if (f1 <= 0) { return x1; } break; case ABOVE_SIDE: if (f1 >= 0) { return x1; } break; default: throw new MathInternalError(); } } // If the current interval is within the given accuracies, we // are satisfied with the current approximation. if (FastMath.abs(x1 - x0) < FastMath.max(rtol * FastMath.abs(x1), atol)) { switch (allowed) { case ANY_SIDE: return x1; case LEFT_SIDE: return inverted ? x1 : x0; case RIGHT_SIDE: return inverted ? x0 : x1; case BELOW_SIDE: return (f1 <= 0) ? x1 : x0; case ABOVE_SIDE: return (f1 >= 0) ? x1 : x0; default: throw new MathInternalError(); } } } }