private void stochasticUpdateStep(Pair<Integer, Set<Integer>> wordPlusContexts, int s) { double eta = learningRateDecay(s); int wordIndex = wordPlusContexts.getFirst(); // actual center word // Set h vector equal to the kth row of weight matrix W1. h = x' * W = W[k,:] = v(input) RealVector h = W1.getRowVector(wordIndex); // 1xN row vector for (int contextWordIndex : wordPlusContexts.getSecond()) { Set<Integer> negativeContexts; if (sampleUnigram) { negativeContexts = negativeSampleContexts(wordIndex, noiseSampler); } else { negativeContexts = negativeSampleContexts(wordIndex); } // wordIndex is the input word // negativeContexts is the k negative contexts // contextWordIndex is 1 positive context // First update the output vectors for 1 positive context RealVector vPrime_j = W2.getColumnVector(contextWordIndex); // Nx1 column vector double u = h.dotProduct(vPrime_j); // u_j = vPrime(output) * v(input) double t_j = 1.0; // t_j := 1{j == contextWordIndex} double scale = sigmoid(u) - t_j; scale = eta * scale; RealVector gradientOut2Hidden = h.mapMultiply(scale); vPrime_j = vPrime_j.subtract(gradientOut2Hidden); W2.setColumnVector(contextWordIndex, vPrime_j); // Next backpropagate the error to the hidden layer and update the input vectors RealVector v_I = h; u = h.dotProduct(vPrime_j); scale = sigmoid(u) - t_j; scale = eta * scale; RealVector gradientHidden2In = vPrime_j.mapMultiply(scale); v_I = v_I.subtract(gradientHidden2In); h = v_I; W1.setRowVector(wordIndex, v_I); // Repeat update process for k negative contexts t_j = 0.0; // t_j := 1{j == contextWordIndex} for (int negContext : negativeContexts) { vPrime_j = W2.getColumnVector(negContext); u = h.dotProduct(vPrime_j); scale = sigmoid(u) - t_j; scale = eta * scale; gradientOut2Hidden = h.mapMultiply(scale); vPrime_j = vPrime_j.subtract(gradientOut2Hidden); W2.setColumnVector(negContext, vPrime_j); // Backpropagate the error to the hidden layer and update the input vectors v_I = h; u = h.dotProduct(vPrime_j); scale = sigmoid(u) - t_j; scale = eta * scale; gradientHidden2In = vPrime_j.mapMultiply(scale); v_I = v_I.subtract(gradientHidden2In); h = v_I; W1.setRowVector(wordIndex, v_I); } } }
public static double Mahalanobis(double[] data, WormGene g) { // Compute Mahalanobis distance from worm gene data assert data.length == g.dim; int dim = g.dim; Vector<double[]> eigvec = DataConverter.mat2Jvector(g.eigenvectors); double[] eigval = DataConverter.colMat2Double(g.eigenvalues); for (int i = 0; i < dim; i++) eigval[i] += Configs.MDIS_EPSILON; for (int i = 0; i < dim; i++) assert eigval[i] != 0; RealVector v1 = MatrixUtils.createRealVector(data); RealVector[] vec = new RealVector[dim]; for (int i = 0; i < dim; i++) { vec[i] = MatrixUtils.createRealVector(eigvec.elementAt(i)); } double[] d = new double[dim]; for (int i = 0; i < dim; i++) { d[i] = v1.dotProduct(vec[i]) / Math.sqrt(eigval[i]); } RealVector dis = MatrixUtils.createRealVector(d); return vectorLength(dis); }
public static double Mahalanobis(double[] data, Vector<double[]> eigvec, double[] eigval) { // Compute Mahalanobis distance from double[]s assert data.length == eigvec.elementAt(0).length && data.length == eigval.length; int dim = eigvec.elementAt(0).length; for (int i = 0; i < dim; i++) eigval[i] += Configs.MDIS_EPSILON; for (int i = 0; i < dim; i++) assert eigval[i] != 0; RealVector v1 = MatrixUtils.createRealVector(data); RealVector[] vec = new RealVector[dim]; for (int i = 0; i < dim; i++) { vec[i] = MatrixUtils.createRealVector(eigvec.elementAt(i)); } double[] d = new double[dim]; for (int i = 0; i < dim; i++) { d[i] = v1.dotProduct(vec[i]) / Math.sqrt(eigval[i]); } RealVector dis = MatrixUtils.createRealVector(d); return vectorLength(dis); }
private static double sigmoid(RealVector x, RealVector y) { double z = x.dotProduct(y); return sigmoid(z); }
public void estimateRSS() { rss = resid.dotProduct(resid); }