コード例 #1
0
ファイル: GLRMModel.java プロジェクト: vijaykiran/h2o-3
 public final double loss(double u, double a, Loss loss) {
   assert loss.isForNumeric() : "Loss function " + loss + " not applicable to numerics";
   switch (loss) {
     case Quadratic:
       return (u - a) * (u - a);
     case Absolute:
       return Math.abs(u - a);
     case Huber:
       return Math.abs(u - a) <= 1 ? 0.5 * (u - a) * (u - a) : Math.abs(u - a) - 0.5;
     case Poisson:
       assert a >= 0 : "Poisson loss L(u,a) requires variable a >= 0";
       return Math.exp(u)
           + (a == 0 ? 0 : -a * u + a * Math.log(a) - a); // Since \lim_{a->0} a*log(a) = 0
     case Hinge:
       // return Math.max(1-a*u,0);
       return Math.max(1 - (a == 0 ? -u : u), 0); // Booleans are coded {0,1} instead of {-1,1}
     case Logistic:
       // return Math.log(1 + Math.exp(-a * u));
       return Math.log(
           1 + Math.exp(a == 0 ? u : -u)); // Booleans are coded {0,1} instead of {-1,1}
     case Periodic:
       return 1 - Math.cos((a - u) * (2 * Math.PI) / _period);
     default:
       throw new RuntimeException("Unknown loss function " + loss);
   }
 }
コード例 #2
0
ファイル: GLRMModel.java プロジェクト: vijaykiran/h2o-3
    public static double mloss(double[] u, int a, Loss multi_loss) {
      assert multi_loss.isForCategorical()
          : "Loss function " + multi_loss + " not applicable to categoricals";
      if (a < 0 || a > u.length - 1)
        throw new IllegalArgumentException(
            "Index must be between 0 and " + String.valueOf(u.length - 1));

      double sum = 0;
      switch (multi_loss) {
        case Categorical:
          for (int i = 0; i < u.length; i++) sum += Math.max(1 + u[i], 0);
          sum += Math.max(1 - u[a], 0) - Math.max(1 + u[a], 0);
          return sum;
        case Ordinal:
          for (int i = 0; i < u.length - 1; i++) sum += Math.max(a > i ? 1 - u[i] : 1, 0);
          return sum;
        default:
          throw new RuntimeException("Unknown multidimensional loss function " + multi_loss);
      }
    }
コード例 #3
0
ファイル: DRF.java プロジェクト: rohit2412/h2o
 @SuppressWarnings("unused")
 @Override
 protected void init() {
   super.init();
   // Initialize local variables
   _mtry =
       (mtries == -1)
           ? // classification: mtry=sqrt(_ncols), regression: mtry=_ncols/3
           (classification ? Math.max((int) Math.sqrt(_ncols), 1) : Math.max(_ncols / 3, 1))
           : mtries;
   if (!(1 <= _mtry && _mtry <= _ncols))
     throw new IllegalArgumentException(
         "Computed mtry should be in interval <1,#cols> but it is " + _mtry);
   if (!(0.0 < sample_rate && sample_rate <= 1.0))
     throw new IllegalArgumentException(
         "Sample rate should be interval (0,1> but it is " + sample_rate);
   if (DEBUG_DETERMINISTIC && seed == -1) _seed = 0x1321e74a0192470cL; // fixed version of seed
   else if (seed == -1) _seed = _seedGenerator.nextLong();
   else _seed = seed;
   if (sample_rate == 1f && validation != null)
     Log.warn(
         Sys.DRF__,
         "Sample rate is 100% and no validation dataset is required. There are no OOB data to perform validation!");
 }
コード例 #4
0
ファイル: GBM.java プロジェクト: shjgiser/h2o
 @Override
 protected float[] score0(double[] data, float[] preds) {
   float[] p = super.score0(data, preds);
   if (nclasses() > 1) { // classification
     // Because we call Math.exp, we have to be numerically stable or else
     // we get Infinities, and then shortly NaN's.  Rescale the data so the
     // largest value is +/-1 and the other values are smaller.
     // See notes here:  http://www.hongliangjie.com/2011/01/07/logsum/
     float maxval = Float.NEGATIVE_INFINITY;
     float dsum = 0;
     if (nclasses() == 2) p[2] = -p[1];
     // Find a max
     for (int k = 1; k < p.length; k++) maxval = Math.max(maxval, p[k]);
     assert !Float.isInfinite(maxval)
         : "Something is wrong with GBM trees since returned prediction is "
             + Arrays.toString(p);
     for (int k = 1; k < p.length; k++) dsum += (p[k] = (float) Math.exp(p[k] - maxval));
     div(p, dsum);
     p[0] = getPrediction(p, data);
   } else { // regression
     // do nothing for regression
   }
   return p;
 }
コード例 #5
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
 private void randomRow(
     Vec[] vecs, Random rand, double[] center, double[] means, double[] mults, int[] modes) {
   long row = Math.max(0, (long) (rand.nextDouble() * vecs[0].length()) - 1);
   data(center, vecs, row, means, mults, modes);
 }
コード例 #6
0
ファイル: GLRMModel.java プロジェクト: vijaykiran/h2o-3
    // public final double[] rproxgrad_x(double[] u, double alpha) { return rproxgrad(u, alpha,
    // _gamma_x, _regularization_x, RandomUtils.getRNG(_seed)); }
    // public final double[] rproxgrad_y(double[] u, double alpha) { return rproxgrad(u, alpha,
    // _gamma_y, _regularization_y, RandomUtils.getRNG(_seed)); }
    static double[] rproxgrad(
        double[] u, double alpha, double gamma, Regularizer regularization, Random rand) {
      if (u == null || alpha == 0 || gamma == 0) return u;
      double[] v = new double[u.length];

      switch (regularization) {
        case None:
          return u;
        case Quadratic:
          for (int i = 0; i < u.length; i++) v[i] = u[i] / (1 + 2 * alpha * gamma);
          return v;
        case L2:
          // Proof uses Moreau decomposition; see section 6.5.1 of Parikh and Boyd
          // https://web.stanford.edu/~boyd/papers/pdf/prox_algs.pdf
          double weight = 1 - alpha * gamma / ArrayUtils.l2norm(u);
          if (weight < 0) return v; // Zero vector
          for (int i = 0; i < u.length; i++) v[i] = weight * u[i];
          return v;
        case L1:
          for (int i = 0; i < u.length; i++)
            v[i] = Math.max(u[i] - alpha * gamma, 0) + Math.min(u[i] + alpha * gamma, 0);
          return v;
        case NonNegative:
          for (int i = 0; i < u.length; i++) v[i] = Math.max(u[i], 0);
          return v;
        case OneSparse:
          int idx = ArrayUtils.maxIndex(u, rand);
          v[idx] = u[idx] > 0 ? u[idx] : 1e-6;
          return v;
        case UnitOneSparse:
          idx = ArrayUtils.maxIndex(u, rand);
          v[idx] = 1;
          return v;
        case Simplex:
          // Proximal gradient algorithm by Chen and Ye in http://arxiv.org/pdf/1101.6081v2.pdf
          // 1) Sort input vector u in ascending order: u[1] <= ... <= u[n]
          int n = u.length;
          int[] idxs = new int[n];
          for (int i = 0; i < n; i++) idxs[i] = i;
          ArrayUtils.sort(idxs, u);

          // 2) Calculate cumulative sum of u in descending order
          // cumsum(u) = (..., u[n-2]+u[n-1]+u[n], u[n-1]+u[n], u[n])
          double[] ucsum = new double[n];
          ucsum[n - 1] = u[idxs[n - 1]];
          for (int i = n - 2; i >= 0; i--) ucsum[i] = ucsum[i + 1] + u[idxs[i]];

          // 3) Let t_i = (\sum_{j=i+1}^n u[j] - 1)/(n - i)
          // For i = n-1,...,1, set optimal t* to first t_i >= u[i]
          double t = (ucsum[0] - 1) / n; // Default t* = (\sum_{j=1}^n u[j] - 1)/n
          for (int i = n - 1; i >= 1; i--) {
            double tmp = (ucsum[i] - 1) / (n - i);
            if (tmp >= u[idxs[i - 1]]) {
              t = tmp;
              break;
            }
          }

          // 4) Return max(u - t*, 0) as projection of u onto simplex
          double[] x = new double[u.length];
          for (int i = 0; i < u.length; i++) x[i] = Math.max(u[i] - t, 0);
          return x;
        default:
          throw new RuntimeException("Unknown regularization function " + regularization);
      }
    }