// FIXME: Use weights double initial_MSE(Vec train, Vec test) { if (train.isEnum()) { // Guess the class of the most populous class; call the fraction of those // Q. Then Q of them are "mostly correct" - error is (1-Q) per element. // The remaining 1-Q elements are "mostly wrong", error is Q (our guess, // which is wrong). int cls = ArrayUtils.maxIndex(train.bins()); double guess = train.bins()[cls] / (train.length() - train.naCnt()); double actual = test.bins()[cls] / (test.length() - test.naCnt()); return guess * guess + actual - 2.0 * actual * guess; } else { // Regression // Guessing the training data mean, but actual is validation set mean double stddev = test.sigma(); double bias = train.mean() - test.mean(); return stddev * stddev + bias * bias; } }
// Pick most common cat level for each cluster_centers' cat columns private static double[][] max_cats(double[][] centers, long[][][] cats, String[][] isCats) { for (int clu = 0; clu < centers.length; clu++) for (int col = 0; col < centers[0].length; col++) if (isCats[col] != null) centers[clu][col] = ArrayUtils.maxIndex(cats[clu][col]); return centers; }
// public final double[] rproxgrad_x(double[] u, double alpha) { return rproxgrad(u, alpha, // _gamma_x, _regularization_x, RandomUtils.getRNG(_seed)); } // public final double[] rproxgrad_y(double[] u, double alpha) { return rproxgrad(u, alpha, // _gamma_y, _regularization_y, RandomUtils.getRNG(_seed)); } static double[] rproxgrad( double[] u, double alpha, double gamma, Regularizer regularization, Random rand) { if (u == null || alpha == 0 || gamma == 0) return u; double[] v = new double[u.length]; switch (regularization) { case None: return u; case Quadratic: for (int i = 0; i < u.length; i++) v[i] = u[i] / (1 + 2 * alpha * gamma); return v; case L2: // Proof uses Moreau decomposition; see section 6.5.1 of Parikh and Boyd // https://web.stanford.edu/~boyd/papers/pdf/prox_algs.pdf double weight = 1 - alpha * gamma / ArrayUtils.l2norm(u); if (weight < 0) return v; // Zero vector for (int i = 0; i < u.length; i++) v[i] = weight * u[i]; return v; case L1: for (int i = 0; i < u.length; i++) v[i] = Math.max(u[i] - alpha * gamma, 0) + Math.min(u[i] + alpha * gamma, 0); return v; case NonNegative: for (int i = 0; i < u.length; i++) v[i] = Math.max(u[i], 0); return v; case OneSparse: int idx = ArrayUtils.maxIndex(u, rand); v[idx] = u[idx] > 0 ? u[idx] : 1e-6; return v; case UnitOneSparse: idx = ArrayUtils.maxIndex(u, rand); v[idx] = 1; return v; case Simplex: // Proximal gradient algorithm by Chen and Ye in http://arxiv.org/pdf/1101.6081v2.pdf // 1) Sort input vector u in ascending order: u[1] <= ... <= u[n] int n = u.length; int[] idxs = new int[n]; for (int i = 0; i < n; i++) idxs[i] = i; ArrayUtils.sort(idxs, u); // 2) Calculate cumulative sum of u in descending order // cumsum(u) = (..., u[n-2]+u[n-1]+u[n], u[n-1]+u[n], u[n]) double[] ucsum = new double[n]; ucsum[n - 1] = u[idxs[n - 1]]; for (int i = n - 2; i >= 0; i--) ucsum[i] = ucsum[i + 1] + u[idxs[i]]; // 3) Let t_i = (\sum_{j=i+1}^n u[j] - 1)/(n - i) // For i = n-1,...,1, set optimal t* to first t_i >= u[i] double t = (ucsum[0] - 1) / n; // Default t* = (\sum_{j=1}^n u[j] - 1)/n for (int i = n - 1; i >= 1; i--) { double tmp = (ucsum[i] - 1) / (n - i); if (tmp >= u[idxs[i - 1]]) { t = tmp; break; } } // 4) Return max(u - t*, 0) as projection of u onto simplex double[] x = new double[u.length]; for (int i = 0; i < u.length; i++) x[i] = Math.max(u[i] - t, 0); return x; default: throw new RuntimeException("Unknown regularization function " + regularization); } }