Ejemplo n.º 1
0
 public Submodel(
     double lambda,
     double[] beta,
     double[] norm_beta,
     long run_time,
     int iteration,
     boolean sparseCoef) {
   this.lambda_value = lambda;
   this.run_time = run_time;
   this.iteration = iteration;
   int r = 0;
   if (beta != null) {
     final double[] b = norm_beta != null ? norm_beta : beta;
     // grab the indeces of non-zero coefficients
     for (double d : beta) if (d != 0) ++r;
     idxs = MemoryManager.malloc4(sparseCoef ? r : beta.length);
     int j = 0;
     for (int i = 0; i < beta.length; ++i) if (!sparseCoef || beta[i] != 0) idxs[j++] = i;
     j = 0;
     this.beta = MemoryManager.malloc8d(idxs.length);
     for (int i : idxs) this.beta[j++] = beta[i];
     if (norm_beta != null) {
       j = 0;
       this.norm_beta = MemoryManager.malloc8d(idxs.length);
       for (int i : idxs) this.norm_beta[j++] = norm_beta[i];
     }
   } else idxs = null;
   rank = r;
   this.sparseCoef = sparseCoef;
 }
Ejemplo n.º 2
0
 public Row(boolean sparse, int nNums, int nBins, int nresponses, double etaOffset) {
   binIds = MemoryManager.malloc4(nBins);
   numVals = MemoryManager.malloc8d(nNums);
   response = MemoryManager.malloc8d(nresponses);
   if (sparse) numIds = MemoryManager.malloc4(nNums);
   this.etaOffset = etaOffset;
   this.nNums = sparse ? 0 : nNums;
 }
Ejemplo n.º 3
0
 public Row(boolean sparse, int nNums, int nBins, int nresponses, int i, long start) {
   binIds = MemoryManager.malloc4(nBins);
   numVals = MemoryManager.malloc8d(nNums);
   response = MemoryManager.malloc8d(nresponses);
   if (sparse) numIds = MemoryManager.malloc4(nNums);
   this.nNums = sparse ? 0 : nNums;
   cid = i;
   rid = start + i;
 }
Ejemplo n.º 4
0
 public void setResponseTransform(TransformType t) {
   _response_transform = t;
   if (t == TransformType.NONE) {
     _normRespMul = null;
     _normRespSub = null;
   } else {
     _normRespMul = MemoryManager.malloc8d(_responses);
     _normRespSub = MemoryManager.malloc8d(_responses);
     setTransform(t, _normRespMul, _normRespSub, _adaptedFrame.numCols() - _responses, _responses);
   }
 }
Ejemplo n.º 5
0
 public void setPredictorTransform(TransformType t) {
   _predictor_transform = t;
   if (t == TransformType.NONE) {
     _normMul = null;
     _normSub = null;
   } else {
     _normMul = MemoryManager.malloc8d(_nums);
     _normSub = MemoryManager.malloc8d(_nums);
     setTransform(t, _normMul, _normSub, _cats, _nums);
   }
 }
Ejemplo n.º 6
0
 private double[] scoreRow(Row r, double o, double[] preds) {
   if (_parms._family == Family.multinomial) {
     if (_eta == null) _eta = new ThreadLocal<>();
     double[] eta = _eta.get();
     if (eta == null) _eta.set(eta = MemoryManager.malloc8d(_output.nclasses()));
     final double[][] bm = _output._global_beta_multinomial;
     double sumExp = 0;
     double maxRow = 0;
     for (int c = 0; c < bm.length; ++c) {
       eta[c] = r.innerProduct(bm[c]) + o;
       if (eta[c] > maxRow) maxRow = eta[c];
     }
     for (int c = 0; c < bm.length; ++c) sumExp += eta[c] = Math.exp(eta[c] - maxRow); // intercept
     sumExp = 1.0 / sumExp;
     for (int c = 0; c < bm.length; ++c) preds[c + 1] = eta[c] * sumExp;
     preds[0] = ArrayUtils.maxIndex(eta);
   } else {
     double mu = _parms.linkInv(r.innerProduct(beta()) + o);
     if (_parms._family == Family.binomial) { // threshold for prediction
       preds[0] = mu >= defaultThreshold() ? 1 : 0;
       preds[1] = 1.0 - mu; // class 0
       preds[2] = mu; // class 1
     } else preds[0] = mu;
   }
   return preds;
 }
Ejemplo n.º 7
0
 protected void cancel_sparse() {
   if (sparseLen() != _len) {
     if (_is != null) {
       int[] is = MemoryManager.malloc4(_len);
       for (int i = 0; i < _len; i++) is[i] = -1;
       for (int i = 0; i < sparseLen(); i++) is[_id[i]] = _is[i];
       _is = is;
     } else if (_ds == null) {
       int[] xs = MemoryManager.malloc4(_len);
       long[] ls = MemoryManager.malloc8(_len);
       for (int i = 0; i < sparseLen(); ++i) {
         xs[_id[i]] = _xs[i];
         ls[_id[i]] = _ls[i];
       }
       _xs = xs;
       _ls = ls;
     } else {
       double[] ds = MemoryManager.malloc8d(_len);
       for (int i = 0; i < sparseLen(); ++i) ds[_id[i]] = _ds[i];
       _ds = ds;
     }
     set_sparseLen(_len);
   }
   _id = null;
 }
Ejemplo n.º 8
0
 public ModelDataAdaptor(OldModel M, int yCol, int[] cols, int[][] catMap) {
   this.M = M;
   _yCol = yCol;
   _row = MemoryManager.malloc8d(cols.length);
   _xCols = cols;
   _catMap = catMap;
 }
Ejemplo n.º 9
0
 private final double[] contractVec(double[] beta, final int[] activeCols) {
   double[] res = MemoryManager.malloc8d(activeCols.length + 1);
   int i = 0;
   for (int c : activeCols) res[i++] = beta[c];
   res[res.length - 1] = beta[beta.length - 1];
   return res;
 }
Ejemplo n.º 10
0
 private double[] setNewBeta(final double[] newBeta) {
   final double[] fullBeta;
   if (_activeCols != null) {
     fullBeta = MemoryManager.malloc8d(_dinfo.fullN() + 1);
     int j = 0;
     for (int i : _activeCols) fullBeta[i] = newBeta[j++];
     assert j == newBeta.length - 1;
     fullBeta[fullBeta.length - 1] = newBeta[j];
   } else {
     assert newBeta.length == _dinfo.fullN() + 1;
     fullBeta = newBeta;
   }
   final double[] newBetaDeNorm;
   if (_dinfo._standardize) {
     newBetaDeNorm = fullBeta.clone();
     double norm = 0.0; // Reverse any normalization on the intercept
     // denormalize only the numeric coefs (categoricals are not normalized)
     final int numoff = _dinfo.numStart();
     for (int i = numoff; i < fullBeta.length - 1; i++) {
       double b = newBetaDeNorm[i] * _dinfo._normMul[i - numoff];
       norm += b * _dinfo._normSub[i - numoff]; // Also accumulate the intercept adjustment
       newBetaDeNorm[i] = b;
     }
     newBetaDeNorm[newBetaDeNorm.length - 1] -= norm;
   } else newBetaDeNorm = null;
   _model.setLambdaSubmodel(
       _lambdaIdx,
       newBetaDeNorm == null ? fullBeta : newBetaDeNorm,
       newBetaDeNorm == null ? null : fullBeta,
       (_iter + 1));
   _model.clone().update(self());
   return fullBeta;
 }
Ejemplo n.º 11
0
 void addNullSubmodel(double lmax, double icept, GLMValidation val) {
   assert _submodels == null;
   double[] beta = MemoryManager.malloc8d(_coefficient_names.length);
   beta[beta.length - 1] = icept;
   _submodels =
       new Submodel[] {new Submodel(lmax, beta, beta, 0, 0, _coefficient_names.length > 750)};
   _submodels[0].validation = val;
 }
Ejemplo n.º 12
0
 private final double[] expandVec(double[] beta, final int[] activeCols) {
   if (activeCols == null) return beta;
   double[] res = MemoryManager.malloc8d(_dinfo.fullN() + 1);
   int i = 0;
   for (int c : activeCols) res[c] = beta[i++];
   res[res.length - 1] = beta[beta.length - 1];
   return res;
 }
Ejemplo n.º 13
0
 private final double[] resizeVec(
     double[] beta, final int[] activeCols, final int[] oldActiveCols) {
   if (activeCols == null || Arrays.equals(activeCols, oldActiveCols)) return beta;
   double[] full = MemoryManager.malloc8d(_dinfo.fullN() + 1);
   int i = 0;
   for (int c : oldActiveCols) full[c] = beta[i++];
   assert i == beta.length - 1;
   full[full.length - 1] = beta[i];
   return contractVec(full, activeCols);
 }
Ejemplo n.º 14
0
 public void setSubmodelIdx(int l) {
   _best_lambda_idx = l;
   _threshold =
       _submodels[l].validation == null ? 0.5f : _submodels[l].validation.best_threshold;
   if (_global_beta == null)
     _global_beta = MemoryManager.malloc8d(this._coefficient_names.length);
   else Arrays.fill(_global_beta, 0);
   int j = 0;
   for (int i : _submodels[l].idxs) _global_beta[i] = _submodels[l].beta[j++];
 }
Ejemplo n.º 15
0
 protected void switch_to_doubles() {
   assert _ds == null;
   double[] ds = MemoryManager.malloc8d(sparseLen());
   for (int i = 0; i < sparseLen(); ++i)
     if (isNA2(i) || isCategorical2(i)) ds[i] = Double.NaN;
     else ds[i] = _ls[i] * PrettyPrint.pow10(_xs[i]);
   _ls = null;
   _xs = null;
   _ds = ds;
 }
Ejemplo n.º 16
0
 @Override
 public boolean solve(Gram gram, double[] xy, double yy, double[] beta) {
   ADMMSolver admm = new ADMMSolver(_lambda, _alpha, 1e-2);
   if (gram != null) return admm.solve(gram, xy, yy, beta);
   Arrays.fill(beta, 0);
   long t1 = System.currentTimeMillis();
   final double[] xb = gram.mul(beta);
   double objval = objectiveVal(xy, yy, beta, xb);
   final double[] newB = MemoryManager.malloc8d(beta.length);
   final double[] newG = MemoryManager.malloc8d(beta.length);
   double step = 1;
   final double l1pen = _lambda * _alpha;
   final double l2pen = _lambda * (1 - _alpha);
   double lsmobjval = lsm_objectiveVal(xy, yy, beta, xb);
   boolean converged = false;
   final int intercept = beta.length - 1;
   int iter = 0;
   MAIN:
   while (!converged && iter < 1000) {
     ++iter;
     step = 1;
     while (step > 1e-12) { // line search
       double l2shrink = 1 / (1 + step * l2pen);
       double l1shrink = l1pen * step;
       for (int i = 0; i < beta.length - 1; ++i)
         newB[i] = l2shrink * shrinkage((beta[i] - step * (xb[i] - xy[i])), l1shrink);
       newB[intercept] = beta[intercept] - step * (xb[intercept] - xy[intercept]);
       gram.mul(newB, newG);
       double newlsmobj = lsm_objectiveVal(xy, yy, newB, newG);
       double fhat = f_hat(newB, lsmobjval, beta, xb, xy, step);
       if (newlsmobj <= fhat) {
         lsmobjval = newlsmobj;
         converged = betaDiff(beta, newB) < 1e-6;
         System.arraycopy(newB, 0, beta, 0, newB.length);
         System.arraycopy(newG, 0, xb, 0, newG.length);
         continue MAIN;
       } else step *= 0.8;
     }
     converged = true;
   }
   return converged;
 }
Ejemplo n.º 17
0
 public void setSubmodelIdx(int l) {
   _best_lambda_idx = l;
   if (_multinomial) {
     _global_beta_multinomial = getNormBetaMultinomial(l);
     for (int i = 0; i < _global_beta_multinomial.length; ++i)
       _global_beta_multinomial[i] = _dinfo.denormalizeBeta(_global_beta_multinomial[i]);
   } else {
     if (_global_beta == null) _global_beta = MemoryManager.malloc8d(_coefficient_names.length);
     else Arrays.fill(_global_beta, 0);
     _submodels[l].getBeta(_global_beta);
     _global_beta = _dinfo.denormalizeBeta(_global_beta);
   }
 }
Ejemplo n.º 18
0
 @Override
 boolean set_impl(int i, double d) {
   if (_ls != null) { // Flip to using doubles
     if (_len2 != _len) throw H2O.unimpl();
     double ds[] = MemoryManager.malloc8d(_len);
     for (int j = 0; j < _len; j++)
       ds[j] = (isNA(j) || isEnum(j)) ? Double.NaN : _ls[j] * Math.pow(10, _xs[j]);
     _ds = ds;
     _ls = null;
     _xs = null;
   }
   _ds[i] = d;
   return true;
 }
Ejemplo n.º 19
0
 private ParallelSolver(
     Gram g, double[] xy, double[] res, double rho, int iBlock, int rBlock) {
   _iBlock = iBlock;
   _rBlock = rBlock;
   gram = g;
   this.xy = xy;
   this.z = res;
   ;
   N = xy.length;
   d = gram._diagAdded;
   this.rho = rho;
   u = MemoryManager.malloc8d(N);
   kappa = _lambda * _alpha / rho;
   max_iter = (int) (10000 * (250.0 / (1 + xy.length)));
   round = Math.max(20, (int) (max_iter * 0.01));
   _k = round;
 }
Ejemplo n.º 20
0
 @Override
 protected boolean chunkInit() {
   final int n_coef = _beta.length;
   sumWeightedCatX = MemoryManager.malloc8d(n_coef - (_dinfo._nums - _n_offsets));
   sumWeightedNumX = MemoryManager.malloc8d(_dinfo._nums);
   sizeRiskSet = MemoryManager.malloc8d(_n_time);
   sizeCensored = MemoryManager.malloc8d(_n_time);
   sizeEvents = MemoryManager.malloc8d(_n_time);
   countEvents = MemoryManager.malloc8(_n_time);
   sumRiskEvents = MemoryManager.malloc8d(_n_time);
   sumLogRiskEvents = MemoryManager.malloc8d(_n_time);
   rcumsumRisk = MemoryManager.malloc8d(_n_time);
   sumXEvents = malloc2DArray(_n_time, n_coef);
   sumXRiskEvents = malloc2DArray(_n_time, n_coef);
   rcumsumXRisk = malloc2DArray(_n_time, n_coef);
   sumXXRiskEvents = malloc3DArray(_n_time, n_coef, n_coef);
   rcumsumXXRisk = malloc3DArray(_n_time, n_coef, n_coef);
   return true;
 }
Ejemplo n.º 21
0
 // Slow-path append data
 private void append2slowd() {
   if (_len > Vec.CHUNK_SZ) throw new ArrayIndexOutOfBoundsException(_len);
   assert _ls == null && _len2 == _len;
   _ds = _ds == null ? MemoryManager.malloc8d(4) : MemoryManager.arrayCopyOf(_ds, _len << 1);
 }
Ejemplo n.º 22
0
  /**
   * Extracts the values, applies regularization to numerics, adds appropriate offsets to
   * categoricals, and adapts response according to the CaseMode/CaseValue if set.
   */
  @Override
  public final void map(Chunk[] chunks, NewChunk[] outputs) {
    if (_job != null && _job.self() != null && !Job.isRunning(_job.self()))
      throw new JobCancelledException();
    final int nrows = chunks[0]._len;
    final long offset = chunks[0]._start;
    chunkInit();
    double[] nums = MemoryManager.malloc8d(_dinfo._nums);
    int[] cats = MemoryManager.malloc4(_dinfo._cats);
    double[] response = MemoryManager.malloc8d(_dinfo._responses);
    int start = 0;
    int end = nrows;

    boolean contiguous = false;
    Random skip_rng = null; // random generator for skipping rows
    if (_useFraction < 1.0) {
      skip_rng = water.util.Utils.getDeterRNG(new Random().nextLong());
      if (contiguous) {
        final int howmany = (int) Math.ceil(_useFraction * nrows);
        if (howmany > 0) {
          start = skip_rng.nextInt(nrows - howmany);
          end = start + howmany;
        }
        assert (start < nrows);
        assert (end <= nrows);
      }
    }

    long[] shuf_map = null;
    if (_shuffle) {
      shuf_map = new long[end - start];
      for (int i = 0; i < shuf_map.length; ++i) shuf_map[i] = start + i;
      Utils.shuffleArray(shuf_map, new Random().nextLong());
    }

    OUTER:
    for (int rr = start; rr < end; ++rr) {
      final int r = shuf_map != null ? (int) shuf_map[rr - start] : rr;
      if ((_dinfo._nfolds > 0 && (r % _dinfo._nfolds) == _dinfo._foldId)
          || (skip_rng != null && skip_rng.nextFloat() > _useFraction)) continue;
      for (Chunk c : chunks) if (c.isNA0(r)) continue OUTER; // skip rows with NAs!
      int i = 0, ncats = 0;
      for (; i < _dinfo._cats; ++i) {
        int c = (int) chunks[i].at80(r);
        if (c != 0) cats[ncats++] = c + _dinfo._catOffsets[i] - 1;
      }
      final int n = chunks.length - _dinfo._responses;
      for (; i < n; ++i) {
        double d = chunks[i].at0(r);
        if (_dinfo._normMul != null)
          d = (d - _dinfo._normSub[i - _dinfo._cats]) * _dinfo._normMul[i - _dinfo._cats];
        nums[i - _dinfo._cats] = d;
      }
      for (i = 0; i < _dinfo._responses; ++i) {
        response[i] = chunks[chunks.length - _dinfo._responses + i].at0(r);
        if (_dinfo._normRespMul != null)
          response[i] = (response[i] - _dinfo._normRespSub[i]) * _dinfo._normRespMul[i];
      }
      if (outputs != null && outputs.length > 0)
        processRow(offset + r, nums, ncats, cats, response, outputs);
      else processRow(offset + r, nums, ncats, cats, response);
    }
    chunkDone();
  }
Ejemplo n.º 23
0
 public double[] getNormBeta() {
   return _submodels[_best_lambda_idx].getBeta(MemoryManager.malloc8d(_dinfo.fullN() + 1));
 }
Ejemplo n.º 24
0
    @Override
    public GLMModelOutputV3 fillFromImpl(GLMModel.GLMOutput impl) {
      super.fillFromImpl(impl);
      lambda_1se = impl.lambda_1se();
      lambda_best = impl.lambda_best();
      if (impl._multinomial) return fillMultinomial(impl);
      String[] names = impl.coefficientNames().clone();
      // put intercept as the first
      String[] ns =
          ArrayUtils.append(new String[] {"Intercept"}, Arrays.copyOf(names, names.length - 1));
      coefficients_table = new TwoDimTableV3();
      final double[] magnitudes;
      double[] beta = impl.beta();
      if (beta == null) beta = MemoryManager.malloc8d(names.length);
      String[] colTypes = new String[] {"double"};
      String[] colFormats = new String[] {"%5f"};
      String[] colnames = new String[] {"Coefficients"};

      if (impl.hasPValues()) {
        colTypes = new String[] {"double", "double", "double", "double"};
        colFormats = new String[] {"%5f", "%5f", "%5f", "%5f"};
        colnames = new String[] {"Coefficients", "Std. Error", "z value", "p value"};
      }
      int stdOff = colnames.length;
      if (impl.isStandardized()) {
        colTypes = ArrayUtils.append(colTypes, "double");
        colFormats = ArrayUtils.append(colFormats, "%5f");
        colnames = ArrayUtils.append(colnames, "Standardized Coefficients");
      }
      TwoDimTable tdt =
          new TwoDimTable(
              "Coefficients", "glm coefficients", ns, colnames, colTypes, colFormats, "names");
      // fill in coefficients

      tdt.set(0, 0, beta[beta.length - 1]);
      for (int i = 0; i < beta.length - 1; ++i) {
        tdt.set(i + 1, 0, beta[i]);
      }
      double[] norm_beta = null;
      if (impl.isStandardized() && impl.beta() != null) {
        norm_beta = impl.getNormBeta();
        tdt.set(0, stdOff, norm_beta[norm_beta.length - 1]);
        for (int i = 0; i < norm_beta.length - 1; ++i) tdt.set(i + 1, stdOff, norm_beta[i]);
      }
      if (impl.hasPValues()) { // fill in p values
        double[] stdErr = impl.stdErr();
        double[] zVals = impl.zValues();
        double[] pVals = impl.pValues();
        tdt.set(0, 1, stdErr[stdErr.length - 1]);
        tdt.set(0, 2, zVals[zVals.length - 1]);
        tdt.set(0, 3, pVals[pVals.length - 1]);
        for (int i = 0; i < stdErr.length - 1; ++i) {
          tdt.set(i + 1, 1, stdErr[i]);
          tdt.set(i + 1, 2, zVals[i]);
          tdt.set(i + 1, 3, pVals[i]);
        }
      }
      coefficients_table.fillFromImpl(tdt);
      if (impl.isStandardized() && impl.beta() != null) {
        magnitudes = norm_beta.clone();
        for (int i = 0; i < magnitudes.length; ++i) if (magnitudes[i] < 0) magnitudes[i] *= -1;
        Integer[] indices = new Integer[magnitudes.length - 1];
        for (int i = 0; i < indices.length; ++i) indices[i] = i;
        Arrays.sort(
            indices,
            new Comparator<Integer>() {
              @Override
              public int compare(Integer o1, Integer o2) {
                if (magnitudes[o1] < magnitudes[o2]) return +1;
                if (magnitudes[o1] > magnitudes[o2]) return -1;
                return 0;
              }
            });
        String[] names2 = new String[names.length];
        for (int i = 0; i < names2.length - 1; ++i) names2[i] = names[indices[i]];
        tdt =
            new TwoDimTable(
                "Standardized Coefficient Magnitudes",
                "standardized coefficient magnitudes",
                names2,
                new String[] {"Coefficients", "Sign"},
                new String[] {"double", "string"},
                new String[] {"%5f", "%s"},
                "names");
        for (int i = 0; i < beta.length - 1; ++i) {
          tdt.set(i, 0, magnitudes[indices[i]]);
          tdt.set(i, 1, beta[indices[i]] < 0 ? "NEG" : "POS");
        }
        standardized_coefficient_magnitudes = new TwoDimTableV3();
        standardized_coefficient_magnitudes.fillFromImpl(tdt);
      }
      return this;
    }
Ejemplo n.º 25
0
 private void run(final double ymu, final long nobs, LMAXTask lmaxt) {
   String[] warns = null;
   if ((!lambda_search || !strong_rules_enabled) && (_dinfo.fullN() > MAX_PREDICTORS))
     throw new IllegalArgumentException(
         "Too many predictors! GLM can only handle "
             + MAX_PREDICTORS
             + " predictors, got "
             + _dinfo.fullN()
             + ", try to run with strong_rules enabled.");
   if (lambda_search) {
     max_iter = Math.max(300, max_iter);
     assert lmaxt != null : "running lambda search, but don't know what is the lambda max!";
     final double lmax = lmaxt.lmax();
     final double lambda_min_ratio =
         _dinfo._adaptedFrame.numRows() > _dinfo.fullN() ? 0.0001 : 0.01;
     final double d = Math.pow(lambda_min_ratio, 0.01);
     lambda = new double[100];
     lambda[0] = lmax;
     for (int i = 1; i < lambda.length; ++i) lambda[i] = lambda[i - 1] * d;
     _runAllLambdas = false;
   } else if (alpha[0] > 0
       && lmaxt
           != null) { // make sure we start with lambda max (and discard all lambda > lambda max)
     final double lmax = lmaxt.lmax();
     int i = 0;
     while (i < lambda.length && lambda[i] > lmax) ++i;
     if (i != 0) {
       Log.info(
           "GLM: removing "
               + i
               + " lambdas > lambda_max: "
               + Arrays.toString(Arrays.copyOf(lambda, i)));
       warns =
           i == lambda.length
               ? new String[] {
                 "Removed " + i + " lambdas > lambda_max",
                 "No lambdas < lambda_max, returning null model."
               }
               : new String[] {"Removed " + i + " lambdas > lambda_max"};
     }
     lambda =
         i == lambda.length
             ? new double[] {lambda_max}
             : Arrays.copyOfRange(lambda, i, lambda.length);
   }
   _model =
       new GLMModel(
           GLM2.this,
           dest(),
           _dinfo,
           _glm,
           beta_epsilon,
           alpha[0],
           lambda_max,
           lambda,
           ymu,
           prior);
   _model.warnings = warns;
   _model.clone().delete_and_lock(self());
   if (lambda[0] == lambda_max && alpha[0] > 0) { // fill-in trivial solution for lambda max
     _beta = MemoryManager.malloc8d(_dinfo.fullN() + 1);
     _beta[_beta.length - 1] = _glm.link(ymu) + _iceptAdjust;
     _model.setLambdaSubmodel(0, _beta, _beta, 0);
     if (lmaxt != null) _model.setAndTestValidation(0, lmaxt._val);
     _lambdaIdx = 1;
   }
   if (_lambdaIdx == lambda.length) // ran only with one lambda > lambda_max => return null model
   GLM2.this.complete(); // signal we're done to anyone waiting for the job
   else {
     ++_iter;
     if (lmaxt != null && strong_rules_enabled)
       activeCols(lambda[_lambdaIdx], lmaxt.lmax(), lmaxt.gradient(l2pen()));
     Log.info(
         "GLM2 staring GLM after "
             + (System.currentTimeMillis() - start)
             + "ms of preprocessing (mean/lmax/strong rules computation)");
     new GLMIterationTask(
             GLM2.this,
             _activeData,
             _glm,
             true,
             false,
             false,
             null,
             _ymu = ymu,
             _reg = 1.0 / nobs,
             new Iteration())
         .asyncExec(_activeData._adaptedFrame);
   }
 }
Ejemplo n.º 26
0
    @Override
    public void callback(final GLMIterationTask glmt) {
      _model.stop_training();
      Log.info(
          "GLM2 iteration("
              + _iter
              + ") done in "
              + (System.currentTimeMillis() - _iterationStartTime)
              + "ms");
      if (!isRunning(self())) throw new JobCancelledException();
      currentLambdaIter++;
      if (glmt._val != null) {
        if (!(glmt._val.residual_deviance
            < glmt._val
                .null_deviance)) { // complete fail, look if we can restart with higher_accuracy on
          if (!highAccuracy()) {
            Log.info(
                "GLM2 reached negative explained deviance without line-search, rerunning with high accuracy settings.");
            setHighAccuracy();
            if (_lastResult != null)
              new GLMIterationTask(
                      GLM2.this,
                      _activeData,
                      glmt._glm,
                      true,
                      true,
                      true,
                      _lastResult._glmt._beta,
                      _ymu,
                      _reg,
                      new Iteration())
                  .asyncExec(_activeData._adaptedFrame);
            else if (_lambdaIdx > 2) // > 2 because 0 is null model, we don't wan to run with that
            new GLMIterationTask(
                      GLM2.this,
                      _activeData,
                      glmt._glm,
                      true,
                      true,
                      true,
                      _model.submodels[_lambdaIdx - 1].norm_beta,
                      _ymu,
                      _reg,
                      new Iteration())
                  .asyncExec(_activeData._adaptedFrame);
            else // no sane solution to go back to, start from scratch!
            new GLMIterationTask(
                      GLM2.this,
                      _activeData,
                      glmt._glm,
                      true,
                      false,
                      false,
                      null,
                      _ymu,
                      _reg,
                      new Iteration())
                  .asyncExec(_activeData._adaptedFrame);
            _lastResult = null;
            return;
          }
        }
        _model.setAndTestValidation(_lambdaIdx, glmt._val);
        _model.clone().update(self());
      }

      if (glmt._val != null && glmt._computeGradient) { // check gradient
        final double[] grad = glmt.gradient(l2pen());
        ADMMSolver.subgrad(alpha[0], lambda[_lambdaIdx], glmt._beta, grad);
        double err = 0;
        for (double d : grad)
          if (d > err) err = d;
          else if (d < -err) err = -d;
        Log.info("GLM2 gradient after " + _iter + " iterations = " + err);
        if (err <= GLM_GRAD_EPS) {
          Log.info(
              "GLM2 converged by reaching small enough gradient, with max |subgradient| = " + err);
          setNewBeta(glmt._beta);
          nextLambda(glmt, glmt._beta);
          return;
        }
      }
      if (glmt._beta != null
          && glmt._val != null
          && glmt._computeGradient
          && _glm.family != Family.tweedie) {
        if (_lastResult != null && needLineSearch(glmt._beta, objval(glmt), 1)) {
          if (!highAccuracy()) {
            setHighAccuracy();
            if (_lastResult._iter
                < (_iter - 2)) { // there is a gap form last result...return to it and start again
              final double[] prevBeta =
                  _lastResult._activeCols != _activeCols
                      ? resizeVec(_lastResult._glmt._beta, _activeCols, _lastResult._activeCols)
                      : _lastResult._glmt._beta;
              new GLMIterationTask(
                      GLM2.this,
                      _activeData,
                      glmt._glm,
                      true,
                      true,
                      true,
                      prevBeta,
                      _ymu,
                      _reg,
                      new Iteration())
                  .asyncExec(_activeData._adaptedFrame);
              return;
            }
          }
          final double[] b =
              resizeVec(_lastResult._glmt._beta, _activeCols, _lastResult._activeCols);
          assert (b.length == glmt._beta.length)
              : b.length + " != " + glmt._beta.length + ", activeCols = " + _activeCols.length;
          new GLMTask.GLMLineSearchTask(
                  GLM2.this,
                  _activeData,
                  _glm,
                  resizeVec(_lastResult._glmt._beta, _activeCols, _lastResult._activeCols),
                  glmt._beta,
                  1e-4,
                  glmt._nobs,
                  alpha[0],
                  lambda[_lambdaIdx],
                  new LineSearchIteration())
              .asyncExec(_activeData._adaptedFrame);
          return;
        }
        _lastResult = new IterationInfo(GLM2.this._iter - 1, glmt, _activeCols);
      }
      final double[] newBeta = MemoryManager.malloc8d(glmt._xy.length);
      ADMMSolver slvr = new ADMMSolver(lambda[_lambdaIdx], alpha[0], ADMM_GRAD_EPS, _addedL2);
      slvr.solve(glmt._gram, glmt._xy, glmt._yy, newBeta);
      _addedL2 = slvr._addedL2;
      if (Utils.hasNaNsOrInfs(newBeta)) {
        Log.info("GLM2 forcibly converged by getting NaNs and/or Infs in beta");
        nextLambda(glmt, glmt._beta);
      } else {
        setNewBeta(newBeta);
        final double bdiff = beta_diff(glmt._beta, newBeta);
        if (_glm.family == Family.gaussian
            || bdiff < beta_epsilon
            || _iter
                == max_iter) { // Gaussian is non-iterative and gradient is ADMMSolver's gradient =>
          // just validate and move on to the next lambda
          int diff = (int) Math.log10(bdiff);
          int nzs = 0;
          for (int i = 0; i < newBeta.length; ++i) if (newBeta[i] != 0) ++nzs;
          if (newBeta.length < 20) System.out.println("beta = " + Arrays.toString(newBeta));
          Log.info(
              "GLM2 (lambda_"
                  + _lambdaIdx
                  + "="
                  + lambda[_lambdaIdx]
                  + ") converged (reached a fixed point with ~ 1e"
                  + diff
                  + " precision) after "
                  + _iter
                  + "iterations, got "
                  + nzs
                  + " nzs");
          nextLambda(glmt, newBeta);
        } else { // not done yet, launch next iteration
          final boolean validate = higher_accuracy || (currentLambdaIter % 5) == 0;
          ++_iter;
          System.out.println("Iter = " + _iter);
          new GLMIterationTask(
                  GLM2.this,
                  _activeData,
                  glmt._glm,
                  true,
                  validate,
                  validate,
                  newBeta,
                  _ymu,
                  _reg,
                  new Iteration())
              .asyncExec(_activeData._adaptedFrame);
        }
      }
    }
Ejemplo n.º 27
0
 public double[] nullModelBeta(FrameTask.DataInfo dinfo, double ymu) {
   double[] res = MemoryManager.malloc8d(dinfo.fullN() + 1);
   res[res.length - 1] = link(ymu);
   return res;
 }
Ejemplo n.º 28
0
 @Override
 public void callback(final GLMIterationTask glmt) {
   if (!isRunning(self())) throw new JobCancelledException();
   boolean converged = false;
   if (glmt._beta != null && glmt._val != null && _glm.family != Family.tweedie) {
     glmt._val.finalize_AIC_AUC();
     _model.setAndTestValidation(_lambdaIdx, glmt._val); // .store();
     _model.clone().update(self());
     converged = true;
     double l1pen = alpha[0] * lambda[_lambdaIdx] * glmt._n;
     double l2pen = (1 - alpha[0]) * lambda[_lambdaIdx] * glmt._n;
     final double eps = 1e-2;
     for (int i = 0; i < glmt._grad.length - 1; ++i) { // add l2 reg. term to the gradient
       glmt._grad[i] += l2pen * glmt._beta[i];
       if (glmt._beta[i] < 0) converged &= Math.abs(glmt._grad[i] - l1pen) < eps;
       else if (glmt._beta[i] > 0) converged &= Math.abs(glmt._grad[i] + l1pen) < eps;
       else converged &= LSMSolver.shrinkage(glmt._grad[i], l1pen + eps) == 0;
     }
     if (converged) Log.info("GLM converged by reaching 0 gradient/subgradient.");
     double objval = glmt._val.residual_deviance + 0.5 * l2pen * l2norm(glmt._beta);
     if (!converged && _lastResult != null && needLineSearch(glmt._beta, objval, 1)) {
       new GLMTask.GLMLineSearchTask(
               GLM2.this,
               _dinfo,
               _glm,
               _lastResult._glmt._beta,
               glmt._beta,
               1e-8,
               new LineSearchIteration())
           .asyncExec(_dinfo._adaptedFrame);
       return;
     }
     _lastResult = new IterationInfo(GLM2.this._iter - 1, objval, glmt);
   }
   double[] newBeta =
       glmt._beta != null ? glmt._beta.clone() : MemoryManager.malloc8d(glmt._xy.length);
   double[] newBetaDeNorm = null;
   ADMMSolver slvr = new ADMMSolver(lambda[_lambdaIdx], alpha[0], _addedL2);
   slvr.solve(glmt._gram, glmt._xy, glmt._yy, newBeta);
   _addedL2 = slvr._addedL2;
   if (Utils.hasNaNsOrInfs(newBeta)) {
     Log.info("GLM forcibly converged by getting NaNs and/or Infs in beta");
   } else {
     if (_dinfo._standardize) {
       newBetaDeNorm = newBeta.clone();
       double norm = 0.0; // Reverse any normalization on the intercept
       // denormalize only the numeric coefs (categoricals are not normalized)
       final int numoff = newBeta.length - _dinfo._nums - 1;
       for (int i = numoff; i < newBeta.length - 1; i++) {
         double b = newBetaDeNorm[i] * _dinfo._normMul[i - numoff];
         norm += b * _dinfo._normSub[i - numoff]; // Also accumulate the intercept adjustment
         newBetaDeNorm[i] = b;
       }
       newBetaDeNorm[newBetaDeNorm.length - 1] -= norm;
     }
     _model.setLambdaSubmodel(
         _lambdaIdx,
         newBetaDeNorm == null ? newBeta : newBetaDeNorm,
         newBetaDeNorm == null ? null : newBeta,
         _iter);
     if (beta_diff(glmt._beta, newBeta) < beta_epsilon) {
       Log.info("GLM converged by reaching fixed-point.");
       converged = true;
     }
     if (!converged && _glm.family != Family.gaussian && _iter < max_iter) {
       ++_iter;
       new GLMIterationTask(GLM2.this, _dinfo, glmt._glm, newBeta, _ymu, _reg, new Iteration())
           .asyncExec(_dinfo._adaptedFrame);
       return;
     }
   }
   // done with this lambda
   nextLambda(glmt);
 }
Ejemplo n.º 29
0
 protected void set_sparse(int nzeros) {
   if (sparseLen() == nzeros && _len != 0) return;
   if (_id != null) { // we have sparse representation but some 0s in it!
     int[] id = MemoryManager.malloc4(nzeros);
     int j = 0;
     if (_ds != null) {
       double[] ds = MemoryManager.malloc8d(nzeros);
       for (int i = 0; i < sparseLen(); ++i) {
         if (_ds[i] != 0) {
           ds[j] = _ds[i];
           id[j] = _id[i];
           ++j;
         }
       }
       _ds = ds;
     } else if (_is != null) {
       int[] is = MemoryManager.malloc4(nzeros);
       for (int i = 0; i < sparseLen(); i++) {
         if (_is[i] != -1) {
           is[j] = _is[i];
           id[j] = id[i];
           ++j;
         }
       }
     } else {
       long[] ls = MemoryManager.malloc8(nzeros);
       int[] xs = MemoryManager.malloc4(nzeros);
       for (int i = 0; i < sparseLen(); ++i) {
         if (_ls[i] != 0) {
           ls[j] = _ls[i];
           xs[j] = _xs[i];
           id[j] = _id[i];
           ++j;
         }
       }
       _ls = ls;
       _xs = xs;
     }
     _id = id;
     assert j == nzeros;
     set_sparseLen(nzeros);
     return;
   }
   assert sparseLen() == _len
       : "_len = " + sparseLen() + ", _len2 = " + _len + ", nzeros = " + nzeros;
   int zs = 0;
   if (_is != null) {
     assert nzeros < _is.length;
     _id = MemoryManager.malloc4(_is.length);
     for (int i = 0; i < sparseLen(); i++) {
       if (_is[i] == -1) zs++;
       else {
         _is[i - zs] = _is[i];
         _id[i - zs] = i;
       }
     }
   } else if (_ds == null) {
     if (_len == 0) {
       _ls = new long[0];
       _xs = new int[0];
       _id = new int[0];
       set_sparseLen(0);
       return;
     } else {
       assert nzeros < sparseLen();
       _id = alloc_indices(_ls.length);
       for (int i = 0; i < sparseLen(); ++i) {
         if (_ls[i] == 0 && _xs[i] == 0) ++zs;
         else {
           _ls[i - zs] = _ls[i];
           _xs[i - zs] = _xs[i];
           _id[i - zs] = i;
         }
       }
     }
   } else {
     assert nzeros < _ds.length;
     _id = alloc_indices(_ds.length);
     for (int i = 0; i < sparseLen(); ++i) {
       if (_ds[i] == 0) ++zs;
       else {
         _ds[i - zs] = _ds[i];
         _id[i - zs] = i;
       }
     }
   }
   assert zs == (sparseLen() - nzeros);
   set_sparseLen(nzeros);
 }
Ejemplo n.º 30
0
 double[] alloc_doubles(int l) {
   return _ds = MemoryManager.malloc8d(l);
 }