/** * Calculates the variance-covariance matrix of the regression parameters. * * <p>Var(b) = (X<sup>T</sup>X)<sup>-1</sup> * * <p>Uses QR decomposition to reduce (X<sup>T</sup>X)<sup>-1</sup> to * (R<sup>T</sup>R)<sup>-1</sup>, with only the top p rows of R included, where p = the length of * the beta vector. * * @return The beta variance-covariance matrix */ @Override protected RealMatrix calculateBetaVariance() { int p = X.getColumnDimension(); RealMatrix Raug = qr.getR().getSubMatrix(0, p - 1, 0, p - 1); RealMatrix Rinv = new LUDecompositionImpl(Raug).getSolver().getInverse(); return Rinv.multiply(Rinv.transpose()); }
public static void main(String[] args) { RealMatrix coefficients2 = new Array2DRowRealMatrix( new double[][] { {0.0D, 1.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D}, {0.0D, 0.0D, 0.857D, 0.0D, 0.054D, 0.018D, 0.0D, 0.071D, 0.0D, 0.0D, 0.0D}, {0.0D, 0.0D, 0.0D, 1.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D}, {0.0D, 0.0D, 0.857D, 0.0D, 0.054D, 0.018D, 0.0D, 0.071D, 0.0D, 0.0D, 0.0D}, {0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 1.0D, 0.0D, 0.0D, 0.0D, 0.0D}, {0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 1.0D, 0.0D, 0.0D, 0.0D, 0.0D}, {0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 1.0D, 0.0D, 0.0D}, {0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.6D, 0.4D}, {0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 1.0D}, {0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 1.0D, 0.0D, 0.0D, 1.0D}, {0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D, 0.0D} }, false); for (int i = 0; i < 11; i++) { coefficients2.setEntry(i, i, -1d); } coefficients2 = coefficients2.transpose(); DecompositionSolver solver = new LUDecompositionImpl(coefficients2).getSolver(); System.out.println("1 method my Value :"); RealVector constants = new ArrayRealVector(new double[] {-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, false); RealVector solution = solver.solve(constants); double[] data = solution.getData(); DecimalFormat df = new DecimalFormat(); df.setRoundingMode(RoundingMode.DOWN); System.out.println("Корни уравнения:"); for (double dd : data) { System.out.print(df.format(dd) + " "); } System.out.println(); System.out.println( "Среднее число процессорных операций, выполняемых при одном прогоне алгоритма: " + operationsByProcess(data, arr)); System.out.println("Среднее число обращений к файлам:"); for (int i = 1; i < 4; i++) { System.out.println(" Файл " + i + " : " + fileMiddleRequest(data, arr, i)); } System.out.println("Среднее количество информации передаваемой при одном обращении к файлам:"); for (int i = 1; i < 4; i++) { System.out.println(" Файл " + i + " : " + bitsPerFileTransfer(data, arr, i)); } System.out.println( "Сумма среднего числа обращений к основным операторам: " + operatorExecute(data, arr)); System.out.println("Средняя трудоемкость этапа: " + middleWork(data, arr)); }
/** * Compute the "hat" matrix. * * <p>The hat matrix is defined in terms of the design matrix X by * X(X<sup>T</sup>X)<sup>-1</sup>X<sup>T</sup> * * <p>The implementation here uses the QR decomposition to compute the hat matrix as Q * I<sub>p</sub>Q<sup>T</sup> where I<sub>p</sub> is the p-dimensional identity matrix augmented * by 0's. This computational formula is from "The Hat Matrix in Regression and ANOVA", David C. * Hoaglin and Roy E. Welsch, <i>The American Statistician</i>, Vol. 32, No. 1 (Feb., 1978), pp. * 17-22. * * @return the hat matrix */ public RealMatrix calculateHat() { // Create augmented identity matrix RealMatrix Q = qr.getQ(); final int p = qr.getR().getColumnDimension(); final int n = Q.getColumnDimension(); Array2DRowRealMatrix augI = new Array2DRowRealMatrix(n, n); double[][] augIData = augI.getDataRef(); for (int i = 0; i < n; i++) { for (int j = 0; j < n; j++) { if (i == j && i < p) { augIData[i][j] = 1d; } else { augIData[i][j] = 0d; } } } // Compute and return Hat matrix return Q.multiply(augI).multiply(Q.transpose()); }
/** * Modifies this map through a single backpropagation iteration using the given error values on * the output nodes. * * @param error */ public void train(List<Double> error, double learningRate) { RealVector eOut = new ArrayRealVector(error.size()); for (int i : series(error.size())) eOut.setEntry(i, error.get(i)); // * gHidden: delta for the non-bias nodes of the hidden layer gHidden.setSubVector(0, stateHidden.getSubVector(0, n)); // optimize for (int i : Series.series(gHidden.getDimension())) gHidden.setEntry(i, activation.derivative(gHidden.getEntry(i))); eHiddenL = weights1.transpose().operate(eOut); eHidden.setSubVector(0, eHiddenL.getSubVector(0, h)); for (int i : series(h)) eHidden.setEntry(i, eHidden.getEntry(i) * gHidden.getEntry(i)); weights1Delta = MatrixTools.outer(eOut, stateHidden); weights1Delta = weights1Delta.scalarMultiply(-1.0 * learningRate); // optimize weights0Delta = MatrixTools.outer(eHidden, stateIn); weights0Delta = weights0Delta.scalarMultiply(-1.0 * learningRate); weights0 = weights0.add(weights0Delta); weights1 = weights1.add(weights1Delta); }