コード例 #1
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
    // Stopping criteria
    boolean isDone(KMeansModel model, double[][] newCenters, double[][] oldCenters) {
      if (!isRunning()) return true; // Stopped/cancelled
      // Stopped for running out iterations
      if (model._output._iterations >= _parms._max_iterations) return true;

      // Compute average change in standardized cluster centers
      if (oldCenters == null) return false; // No prior iteration, not stopping
      double average_change = 0;
      for (int clu = 0; clu < _parms._k; clu++)
        average_change +=
            hex.genmodel.GenModel.KMeans_distance(
                oldCenters[clu], newCenters[clu], _isCats, null, null);
      average_change /= _parms._k; // Average change per cluster
      model._output._avg_centroids_chg =
          ArrayUtils.copyAndFillOf(
              model._output._avg_centroids_chg,
              model._output._avg_centroids_chg.length + 1,
              average_change);
      model._output._training_time_ms =
          ArrayUtils.copyAndFillOf(
              model._output._training_time_ms,
              model._output._training_time_ms.length + 1,
              System.currentTimeMillis());
      return average_change < TOLERANCE;
    }
コード例 #2
0
  /**
   * Compute Variable Importance, based on GEDEON: DATA MINING OF INPUTS: ANALYSING MAGNITUDE AND
   * FUNCTIONAL MEASURES
   *
   * @return variable importances for input features
   */
  public float[] computeVariableImportances() {
    float[] vi = new float[units[0]];
    Arrays.fill(vi, 0f);

    float[][] Qik = new float[units[0]][units[2]]; // importance of input i on output k
    float[] sum_wj = new float[units[1]]; // sum of incoming weights into first hidden layer
    float[] sum_wk =
        new float[units[2]]; // sum of incoming weights into output layer (or second hidden layer)
    for (float[] Qi : Qik) Arrays.fill(Qi, 0f);
    Arrays.fill(sum_wj, 0f);
    Arrays.fill(sum_wk, 0f);

    // compute sum of absolute incoming weights
    for (int j = 0; j < units[1]; j++) {
      for (int i = 0; i < units[0]; i++) {
        float wij = get_weights(0).get(j, i);
        sum_wj[j] += Math.abs(wij);
      }
    }
    for (int k = 0; k < units[2]; k++) {
      for (int j = 0; j < units[1]; j++) {
        float wjk = get_weights(1).get(k, j);
        sum_wk[k] += Math.abs(wjk);
      }
    }
    // compute importance of input i on output k as product of connecting weights going through j
    for (int i = 0; i < units[0]; i++) {
      for (int k = 0; k < units[2]; k++) {
        for (int j = 0; j < units[1]; j++) {
          float wij = get_weights(0).get(j, i);
          float wjk = get_weights(1).get(k, j);
          // Qik[i][k] += Math.abs(wij)/sum_wj[j] * wjk; //Wong,Gedeon,Taggart '95
          Qik[i][k] += Math.abs(wij) / sum_wj[j] * Math.abs(wjk) / sum_wk[k]; // Gedeon '97
        }
      }
    }
    // normalize Qik over all outputs k
    for (int k = 0; k < units[2]; k++) {
      float sumQk = 0;
      for (int i = 0; i < units[0]; i++) sumQk += Qik[i][k];
      for (int i = 0; i < units[0]; i++) Qik[i][k] /= sumQk;
    }
    // importance for feature i is the sum over k of i->k importances
    for (int i = 0; i < units[0]; i++) vi[i] = ArrayUtils.sum(Qik[i]);

    // normalize importances such that max(vi) = 1
    ArrayUtils.div(vi, ArrayUtils.maxValue(vi));

    // zero out missing categorical variables if they were never seen
    if (_saw_missing_cats != null) {
      for (int i = 0; i < _saw_missing_cats.length; ++i) {
        assert (data_info._catMissing[i] == 1); // have a missing bucket for each categorical
        if (!_saw_missing_cats[i]) vi[data_info._catOffsets[i + 1] - 1] = 0;
      }
    }
    return vi;
  }
コード例 #3
0
ファイル: GLRMModel.java プロジェクト: vijaykiran/h2o-3
  /**
   * Project each archetype into original feature space
   *
   * @param frame Original training data with m rows and n columns
   * @param destination_key Frame Id for output
   * @return Frame containing k rows and n columns, where each row corresponds to an archetype
   */
  public Frame scoreArchetypes(Frame frame, Key destination_key, boolean reverse_transform) {
    final int ncols = _output._names.length;
    Frame adaptedFr = new Frame(frame);
    adaptTestForTrain(adaptedFr, true, false);
    assert ncols == adaptedFr.numCols();
    String[][] adaptedDomme = adaptedFr.domains();
    double[][] proj = new double[_parms._k][_output._nnums + _output._ncats];

    // Categorical columns
    for (int d = 0; d < _output._ncats; d++) {
      double[][] block = _output._archetypes_raw.getCatBlock(d);
      for (int k = 0; k < _parms._k; k++)
        proj[k][_output._permutation[d]] = _parms.mimpute(block[k], _output._lossFunc[d]);
    }

    // Numeric columns
    for (int d = _output._ncats; d < (_output._ncats + _output._nnums); d++) {
      int ds = d - _output._ncats;
      for (int k = 0; k < _parms._k; k++) {
        double num = _output._archetypes_raw.getNum(ds, k);
        proj[k][_output._permutation[d]] = _parms.impute(num, _output._lossFunc[d]);
        if (reverse_transform)
          proj[k][_output._permutation[d]] =
              proj[k][_output._permutation[d]] / _output._normMul[ds] + _output._normSub[ds];
      }
    }

    // Convert projection of archetypes into a frame with correct domains
    Frame f =
        ArrayUtils.frame(
            (null == destination_key ? Key.make() : destination_key), adaptedFr.names(), proj);
    for (int i = 0; i < ncols; i++) f.vec(i).setDomain(adaptedDomme[i]);
    return f;
  }
コード例 #4
0
ファイル: SharedTree.java プロジェクト: herberteuler/h2o-3
 // Call builder specific score code and then correct probabilities
 // if it is necessary.
 void score2(Chunk chks[], double weight, double offset, double fs[ /*nclass*/], int row) {
   double sum = score1(chks, weight, offset, fs, row);
   if (isClassifier()) {
     if (!Double.isInfinite(sum) && sum > 0f && sum != 1f) ArrayUtils.div(fs, sum);
     if (_parms._balance_classes)
       GenModel.correctProbabilities(
           fs, _model._output._priorClassDist, _model._output._modelClassDist);
   }
 }
コード例 #5
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
 @Override
 public void reduce(Lloyds mr) {
   for (int clu = 0; clu < _k; clu++) {
     long ra = _size[clu];
     long rb = mr._size[clu];
     double[] ma = _cMeans[clu];
     double[] mb = mr._cMeans[clu];
     for (int c = 0; c < ma.length; c++) // Recursive mean
     if (ra + rb > 0) ma[c] = (ma[c] * ra + mb[c] * rb) / (ra + rb);
   }
   ArrayUtils.add(_cats, mr._cats);
   ArrayUtils.add(_cSqr, mr._cSqr);
   ArrayUtils.add(_size, mr._size);
   // track global worst-row
   if (_worst_err < mr._worst_err) {
     _worst_err = mr._worst_err;
     _worst_row = mr._worst_row;
   }
 }
コード例 #6
0
ファイル: GLRMModel.java プロジェクト: vijaykiran/h2o-3
 public static int mimpute(double[] u, Loss multi_loss) {
   assert multi_loss.isForCategorical()
       : "Loss function " + multi_loss + " not applicable to categoricals";
   switch (multi_loss) {
     case Categorical:
     case Ordinal:
       double[] cand = new double[u.length];
       for (int a = 0; a < cand.length; a++) cand[a] = mloss(u, a, multi_loss);
       return ArrayUtils.minIndex(cand);
     default:
       throw new RuntimeException("Unknown multidimensional loss function " + multi_loss);
   }
 }
コード例 #7
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
    // Compute all interesting KMeans stats (errors & variances of clusters,
    // etc).  Return new centers.
    double[][] computeStatsFillModel(
        Lloyds task,
        KMeansModel model,
        final Vec[] vecs,
        final double[] means,
        final double[] mults,
        final int[] modes) {
      // Fill in the model based on original destandardized centers
      if (model._parms._standardize) {
        model._output._centers_std_raw = task._cMeans;
      }
      model._output._centers_raw = destandardize(task._cMeans, _isCats, means, mults);
      model._output._size = task._size;
      model._output._withinss = task._cSqr;
      double ssq = 0; // sum squared error
      for (int i = 0; i < _parms._k; i++)
        ssq += model._output._withinss[i]; // sum squared error all clusters
      model._output._tot_withinss = ssq;

      // Sum-of-square distance from grand mean
      if (_parms._k == 1) model._output._totss = model._output._tot_withinss;
      else {
        // If data already standardized, grand mean is just the origin
        TotSS totss =
            new TotSS(means, mults, modes, _parms.train().domains(), _parms.train().cardinality())
                .doAll(vecs);
        model._output._totss = totss._tss;
      }
      model._output._betweenss =
          model._output._totss - model._output._tot_withinss; // MSE between-cluster
      model._output._iterations++;

      // add to scoring history
      model._output._history_withinss =
          ArrayUtils.copyAndFillOf(
              model._output._history_withinss,
              model._output._history_withinss.length + 1,
              model._output._tot_withinss);

      // Two small TwoDimTables - cheap
      model._output._model_summary = createModelSummaryTable(model._output);
      model._output._scoring_history = createScoringHistoryTable(model._output);

      // Take the cluster stats from the model, and assemble them into a model metrics object
      model._output._training_metrics = makeTrainingMetrics(model);

      return task._cMeans; // New centers
    }
コード例 #8
0
 /** Unique identifier for this model's state, based on raw numbers */
 protected long checksum_impl() {
   long cs = parameters._seed;
   cs ^= size() * get_processed_total();
   cs ^= (long) (2234.3424 * ArrayUtils.sum(mean_bias));
   cs *= (long) (9234.1343 * ArrayUtils.sum(rms_bias));
   cs ^= (long) (9723.9734 * ArrayUtils.sum(mean_weight));
   cs *= (long) (9234.1783 * ArrayUtils.sum(rms_weight));
   cs ^= (long) (4273.2344 * (Math.E + ArrayUtils.sum(mean_rate)));
   cs *= (long) (3378.1999 * (Math.PI + ArrayUtils.sum(rms_rate)));
   return cs;
 }
コード例 #9
0
ファイル: SharedTree.java プロジェクト: herberteuler/h2o-3
 // FIXME: Use weights
 double initial_MSE(Vec train, Vec test) {
   if (train.isEnum()) {
     // Guess the class of the most populous class; call the fraction of those
     // Q.  Then Q of them are "mostly correct" - error is (1-Q) per element.
     // The remaining 1-Q elements are "mostly wrong", error is Q (our guess,
     // which is wrong).
     int cls = ArrayUtils.maxIndex(train.bins());
     double guess = train.bins()[cls] / (train.length() - train.naCnt());
     double actual = test.bins()[cls] / (test.length() - test.naCnt());
     return guess * guess + actual - 2.0 * actual * guess;
   } else { // Regression
     // Guessing the training data mean, but actual is validation set mean
     double stddev = test.sigma();
     double bias = train.mean() - test.mean();
     return stddev * stddev + bias * bias;
   }
 }
コード例 #10
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
    @Override
    public void map(Chunk[] cs) {
      int N = cs.length - (_hasWeight ? 1 : 0);
      assert _centers[0].length == N;
      _cMeans = new double[_k][N];
      _cSqr = new double[_k];
      _size = new long[_k];
      // Space for cat histograms
      _cats = new long[_k][N][];
      for (int clu = 0; clu < _k; clu++)
        for (int col = 0; col < N; col++)
          _cats[clu][col] = _isCats[col] == null ? null : new long[cs[col].vec().cardinality()];
      _worst_err = 0;

      // Find closest cluster center for each row
      double[] values = new double[N]; // Temp data to hold row as doubles
      ClusterDist cd = new ClusterDist();
      for (int row = 0; row < cs[0]._len; row++) {
        double weight = _hasWeight ? cs[N].atd(row) : 1;
        if (weight == 0) continue; // skip holdout rows
        assert (weight == 1); // K-Means only works for weight 1 (or weight 0 for holdout)
        data(values, cs, row, _means, _mults, _modes); // Load row as doubles
        closest(_centers, values, _isCats, cd); // Find closest cluster center
        int clu = cd._cluster;
        assert clu != -1; // No broken rows
        _cSqr[clu] += cd._dist;

        // Add values and increment counter for chosen cluster
        for (int col = 0; col < N; col++)
          if (_isCats[col] != null) _cats[clu][col][(int) values[col]]++; // Histogram the cats
          else _cMeans[clu][col] += values[col]; // Sum the column centers
        _size[clu]++;
        // Track worst row
        if (cd._dist > _worst_err) {
          _worst_err = cd._dist;
          _worst_row = cs[0].start() + row;
        }
      }
      // Scale back down to local mean
      for (int clu = 0; clu < _k; clu++)
        if (_size[clu] != 0) ArrayUtils.div(_cMeans[clu], _size[clu]);
      _centers = null;
      _means = _mults = null;
      _modes = null;
    }
コード例 #11
0
 /**
  * Divide all weights/biases by a real-valued number
  *
  * @param N
  */
 protected void div(float N) {
   for (int i = 0; i < dense_row_weights.length; ++i) ArrayUtils.div(get_weights(i).raw(), N);
   for (Storage.Vector bias : biases) ArrayUtils.div(bias.raw(), N);
   if (avg_activations != null)
     for (Storage.Vector avgac : avg_activations) ArrayUtils.div(avgac.raw(), N);
   if (has_momenta()) {
     for (int i = 0; i < dense_row_weights_momenta.length; ++i)
       ArrayUtils.div(get_weights_momenta(i).raw(), N);
     for (Storage.Vector bias_momenta : biases_momenta) ArrayUtils.div(bias_momenta.raw(), N);
   }
   if (adaDelta()) {
     for (int i = 0; i < dense_row_ada_dx_g.length; ++i) {
       ArrayUtils.div(get_ada_dx_g(i).raw(), N);
     }
   }
 }
コード例 #12
0
 /**
  * Add another model info into this This will add the weights/biases/learning rate helpers, and
  * the number of processed training samples Note: It will NOT add the elastic averaging helpers,
  * which are always kept constant (they already are the result of a reduction)
  *
  * @param other
  */
 public void add(DeepLearningModelInfo other) {
   for (int i = 0; i < dense_row_weights.length; ++i)
     ArrayUtils.add(get_weights(i).raw(), other.get_weights(i).raw());
   for (int i = 0; i < biases.length; ++i) ArrayUtils.add(biases[i].raw(), other.biases[i].raw());
   if (avg_activations != null)
     for (int i = 0; i < avg_activations.length; ++i)
       ArrayUtils.add(avg_activations[i].raw(), other.biases[i].raw());
   if (has_momenta()) {
     assert (other.has_momenta());
     for (int i = 0; i < dense_row_weights_momenta.length; ++i)
       ArrayUtils.add(get_weights_momenta(i).raw(), other.get_weights_momenta(i).raw());
     for (int i = 0; i < biases_momenta.length; ++i)
       ArrayUtils.add(biases_momenta[i].raw(), other.biases_momenta[i].raw());
   }
   if (adaDelta()) {
     assert (other.adaDelta());
     for (int i = 0; i < dense_row_ada_dx_g.length; ++i) {
       ArrayUtils.add(get_ada_dx_g(i).raw(), other.get_ada_dx_g(i).raw());
     }
   }
   add_processed_local(other.get_processed_local());
 }
コード例 #13
0
 @Override
 public void reduce(CoxPHTask that) {
   n += that.n;
   sumWeights += that.sumWeights;
   ArrayUtils.add(sumWeightedCatX, that.sumWeightedCatX);
   ArrayUtils.add(sumWeightedNumX, that.sumWeightedNumX);
   ArrayUtils.add(sizeRiskSet, that.sizeRiskSet);
   ArrayUtils.add(sizeCensored, that.sizeCensored);
   ArrayUtils.add(sizeEvents, that.sizeEvents);
   ArrayUtils.add(countEvents, that.countEvents);
   ArrayUtils.add(sumXEvents, that.sumXEvents);
   ArrayUtils.add(sumRiskEvents, that.sumRiskEvents);
   ArrayUtils.add(sumXRiskEvents, that.sumXRiskEvents);
   ArrayUtils.add(sumXXRiskEvents, that.sumXXRiskEvents);
   ArrayUtils.add(sumLogRiskEvents, that.sumLogRiskEvents);
   ArrayUtils.add(rcumsumRisk, that.rcumsumRisk);
   ArrayUtils.add(rcumsumXRisk, that.rcumsumXRisk);
   ArrayUtils.add(rcumsumXXRisk, that.rcumsumXXRisk);
 }
コード例 #14
0
ファイル: PojoUtils.java プロジェクト: zxsted/h2o-3
  /**
   * Copy properties "of the same name" from one POJO to the other. If the fields are named
   * consistently (both sides have fields named "_foo" and/or "bar") this acts like Apache Commons
   * PojoUtils.copyProperties(). If one side has leading underscores and the other does not then the
   * names are conformed according to the field_naming parameter.
   *
   * @param dest Destination POJO
   * @param origin Origin POJO
   * @param field_naming Are the fields named consistently, or does one side have underscores?
   * @param skip_fields Array of origin or destination field names to skip
   * @param only_fields Array of origin or destination field names to include; ones not in this list
   *     will be skipped
   */
  public static void copyProperties(
      Object dest,
      Object origin,
      FieldNaming field_naming,
      String[] skip_fields,
      String[] only_fields) {
    if (null == dest || null == origin) return;

    Field[] dest_fields = Weaver.getWovenFields(dest.getClass());
    Field[] orig_fields = Weaver.getWovenFields(origin.getClass());

    for (Field orig_field : orig_fields) {
      String origin_name = orig_field.getName();

      if (skip_fields != null & ArrayUtils.contains(skip_fields, origin_name)) continue;

      if (only_fields != null & !ArrayUtils.contains(only_fields, origin_name)) continue;

      String dest_name = null;
      if (field_naming == FieldNaming.CONSISTENT) {
        dest_name = origin_name;
      } else if (field_naming == FieldNaming.DEST_HAS_UNDERSCORES) {
        dest_name = "_" + origin_name;
      } else if (field_naming == FieldNaming.ORIGIN_HAS_UNDERSCORES) {
        dest_name = origin_name.substring(1);
      }

      if (skip_fields != null & ArrayUtils.contains(skip_fields, dest_name)) continue;

      if (only_fields != null & !ArrayUtils.contains(only_fields, dest_name)) continue;

      try {
        Field dest_field = null;
        for (Field fd : dest_fields) {
          if (fd.getName().equals(dest_name)) {
            dest_field = fd;
            break;
          }
        }

        if (dest_field != null) {
          dest_field.setAccessible(true);
          orig_field.setAccessible(true);
          // Log.info("PojoUtils.copyProperties, origin field: " + orig_field + "; destination
          // field: " + dest_field);
          if (null == orig_field.get(origin)) {
            //
            // Assigning null to dest.
            //
            dest_field.set(dest, null);
          } else if (dest_field.getType().isArray()
              && orig_field.getType().isArray()
              && (dest_field.getType().getComponentType()
                  != orig_field.getType().getComponentType())) {
            //
            // Assigning an array to another array.
            //
            // You can't use reflection to set an int[] with an Integer[].  Argh.
            // TODO: other types of arrays. . .
            if (dest_field.getType().getComponentType() == double.class
                && orig_field.getType().getComponentType() == Double.class) {
              //
              // Assigning an Double[] to an double[]
              //
              double[] copy = (double[]) orig_field.get(origin);
              dest_field.set(dest, copy);
            } else if (dest_field.getType().getComponentType() == Double.class
                && orig_field.getType().getComponentType() == double.class) {
              //
              // Assigning an double[] to an Double[]
              //
              Double[] copy = (Double[]) orig_field.get(origin);
              dest_field.set(dest, copy);
            } else if (dest_field.getType().getComponentType() == int.class
                && orig_field.getType().getComponentType() == Integer.class) {
              //
              // Assigning an Integer[] to an int[]
              //
              int[] copy = (int[]) orig_field.get(origin);
              dest_field.set(dest, copy);
            } else if (dest_field.getType().getComponentType() == Integer.class
                && orig_field.getType().getComponentType() == int.class) {
              //
              // Assigning an int[] to an Integer[]
              //
              Integer[] copy = (Integer[]) orig_field.get(origin);
              dest_field.set(dest, copy);
            } else if (Schema.class.isAssignableFrom(dest_field.getType().getComponentType())
                && (Schema.getImplClass(
                        (Class<? extends Schema>) dest_field.getType().getComponentType()))
                    .isAssignableFrom(orig_field.getType().getComponentType())) {
              //
              // Assigning an array of impl fields to an array of schema fields, e.g. a
              // DeepLearningParameters[] into a DeepLearningParametersV2[]
              //
              Class dest_component_class = dest_field.getType().getComponentType();
              Schema[] translation =
                  (Schema[])
                      Array.newInstance(
                          dest_component_class, Array.getLength(orig_field.get(origin)));
              int i = 0;
              int version = ((Schema) dest).getSchemaVersion();

              // Look up the schema for each element of the array; if not found fall back to the
              // schema for the base class.
              for (Iced impl : ((Iced[]) orig_field.get(origin))) {
                if (null == impl) {
                  translation[i++] = null;
                } else {
                  Schema s = null;
                  try {
                    s = Schema.schema(version, impl);
                  } catch (H2ONotFoundArgumentException e) {
                    s = ((Schema) dest_field.getType().getComponentType().newInstance());
                  }
                  translation[i++] = s.fillFromImpl(impl);
                }
              }
              dest_field.set(dest, translation);
            } else if (Schema.class.isAssignableFrom(orig_field.getType().getComponentType())
                && Iced.class.isAssignableFrom(dest_field.getType().getComponentType())) {
              //
              // Assigning an array of schema fields to an array of impl fields, e.g. a
              // DeepLearningParametersV2[] into a DeepLearningParameters[]
              //
              // We can't check against the actual impl class I, because we can't instantiate the
              // schema base classes to get the impl class from an instance:
              // dest_field.getType().getComponentType().isAssignableFrom(((Schema)f.getType().getComponentType().newInstance()).getImplClass())) {
              Class dest_component_class = dest_field.getType().getComponentType();
              Iced[] translation =
                  (Iced[])
                      Array.newInstance(
                          dest_component_class, Array.getLength(orig_field.get(origin)));
              int i = 0;
              for (Schema s : ((Schema[]) orig_field.get(origin))) {
                translation[i++] = s.createImpl();
              }
              dest_field.set(dest, translation);
            } else {
              throw H2O.fail(
                  "Don't know how to cast an array of: "
                      + orig_field.getType().getComponentType()
                      + " to an array of: "
                      + dest_field.getType().getComponentType());
            }
            // end of array handling
          } else if (dest_field.getType() == Key.class
              && Keyed.class.isAssignableFrom(orig_field.getType())) {
            //
            // Assigning a Keyed (e.g., a Frame or Model) to a Key.
            //
            dest_field.set(dest, ((Keyed) orig_field.get(origin))._key);
          } else if (orig_field.getType() == Key.class
              && Keyed.class.isAssignableFrom(dest_field.getType())) {
            //
            // Assigning a Key (for e.g., a Frame or Model) to a Keyed (e.g., a Frame or Model).
            //
            Value v = DKV.get((Key) orig_field.get(origin));
            dest_field.set(dest, (null == v ? null : v.get()));
          } else if (KeyV3.class.isAssignableFrom(dest_field.getType())
              && Keyed.class.isAssignableFrom(orig_field.getType())) {
            //
            // Assigning a Keyed (e.g., a Frame or Model) to a KeyV1.
            //
            dest_field.set(
                dest,
                KeyV3.make(
                    ((Class<? extends KeyV3>) dest_field.getType()),
                    ((Keyed) orig_field.get(origin))._key));
          } else if (KeyV3.class.isAssignableFrom(orig_field.getType())
              && Keyed.class.isAssignableFrom(dest_field.getType())) {
            //
            // Assigning a KeyV1 (for e.g., a Frame or Model) to a Keyed (e.g., a Frame or Model).
            //
            KeyV3 k = (KeyV3) orig_field.get(origin);
            Value v = DKV.get(Key.make(k.name));
            dest_field.set(dest, (null == v ? null : v.get()));
          } else if (KeyV3.class.isAssignableFrom(dest_field.getType())
              && Key.class.isAssignableFrom(orig_field.getType())) {
            //
            // Assigning a Key to a KeyV1.
            //
            dest_field.set(
                dest,
                KeyV3.make(
                    ((Class<? extends KeyV3>) dest_field.getType()), (Key) orig_field.get(origin)));
          } else if (KeyV3.class.isAssignableFrom(orig_field.getType())
              && Key.class.isAssignableFrom(dest_field.getType())) {
            //
            // Assigning a KeyV1 to a Key.
            //
            KeyV3 k = (KeyV3) orig_field.get(origin);
            dest_field.set(dest, (null == k.name ? null : Key.make(k.name)));
          } else if (dest_field.getType() == Pattern.class
              && String.class.isAssignableFrom(orig_field.getType())) {
            //
            // Assigning a String to a Pattern.
            //
            dest_field.set(dest, Pattern.compile((String) orig_field.get(origin)));
          } else if (orig_field.getType() == Pattern.class
              && String.class.isAssignableFrom(dest_field.getType())) {
            //
            // We are assigning a Pattern to a String.
            //
            dest_field.set(dest, orig_field.get(origin).toString());
          } else if (dest_field.getType() == FrameV3.ColSpecifierV3.class
              && String.class.isAssignableFrom(orig_field.getType())) {
            //
            // Assigning a String to a ColSpecifier.  Note that we currently support only the
            // colname, not a frame name too.
            //
            dest_field.set(dest, new FrameV3.ColSpecifierV3((String) orig_field.get(origin)));
          } else if (orig_field.getType() == FrameV3.ColSpecifierV3.class
              && String.class.isAssignableFrom(dest_field.getType())) {
            //
            // We are assigning a ColSpecifierV2 to a String.  The column_name gets copied.
            //
            dest_field.set(dest, ((FrameV3.ColSpecifierV3) orig_field.get(origin)).column_name);
          } else if (Enum.class.isAssignableFrom(dest_field.getType())
              && String.class.isAssignableFrom(orig_field.getType())) {
            //
            // Assigning a String into an enum field.
            //
            Class<Enum> dest_class = (Class<Enum>) dest_field.getType();
            dest_field.set(dest, Enum.valueOf(dest_class, (String) orig_field.get(origin)));
          } else if (Enum.class.isAssignableFrom(orig_field.getType())
              && String.class.isAssignableFrom(dest_field.getType())) {
            //
            // Assigning an enum field into a String.
            //
            Object o = orig_field.get(origin);
            dest_field.set(dest, (o == null ? null : o.toString()));
          } else if (Schema.class.isAssignableFrom(dest_field.getType())
              && Schema.getImplClass((Class<? extends Schema>) dest_field.getType())
                  .isAssignableFrom(orig_field.getType())) {
            //
            // Assigning an impl field into a schema field, e.g. a DeepLearningParameters into a
            // DeepLearningParametersV2.
            //
            dest_field.set(
                dest,
                Schema.schema(
                        /* ((Schema)dest).getSchemaVersion() TODO: remove HACK!! */ 3,
                        (Class<? extends Iced>) orig_field.get(origin).getClass())
                    .fillFromImpl((Iced) orig_field.get(origin)));
          } else if (Schema.class.isAssignableFrom(orig_field.getType())
              && Schema.getImplClass((Class<? extends Schema>) orig_field.getType())
                  .isAssignableFrom(dest_field.getType())) {
            //
            // Assigning a schema field into an impl field, e.g. a DeepLearningParametersV2 into a
            // DeepLearningParameters.
            //
            Schema s = ((Schema) orig_field.get(origin));
            dest_field.set(dest, s.fillImpl(s.createImpl()));
          } else if ((Schema.class.isAssignableFrom(dest_field.getType())
              && Key.class.isAssignableFrom(orig_field.getType()))) {
            //
            // Assigning an impl field fetched via a Key into a schema field, e.g. a
            // DeepLearningParameters into a DeepLearningParametersV2.
            // Note that unlike the cases above we don't know the type of the impl class until we
            // fetch in the body of the if.
            //
            Key origin_key = (Key) orig_field.get(origin);
            Value v = DKV.get(origin_key);
            if (null == v || null == v.get()) {
              dest_field.set(dest, null);
            } else {
              if (((Schema) dest_field.get(dest))
                  .getImplClass()
                  .isAssignableFrom(v.get().getClass())) {
                Schema s = ((Schema) dest_field.get(dest));
                dest_field.set(
                    dest,
                    Schema.schema(s.getSchemaVersion(), s.getImplClass()).fillFromImpl(v.get()));
              } else {
                Log.err(
                    "Can't fill Schema of type: "
                        + dest_field.getType()
                        + " with value of type: "
                        + v.getClass()
                        + " fetched from Key: "
                        + origin_key);
                dest_field.set(dest, null);
              }
            }
          } else if (Schema.class.isAssignableFrom(orig_field.getType())
              && Keyed.class.isAssignableFrom(dest_field.getType())) {
            //
            // Assigning a schema field into a Key field, e.g. a DeepLearningV2 into a
            // (DeepLearningParameters) key.
            //
            Schema s = ((Schema) orig_field.get(origin));
            dest_field.set(dest, ((Keyed) s.fillImpl(s.createImpl()))._key);
          } else {
            //
            // Normal case: not doing any type conversion.
            //
            dest_field.set(dest, orig_field.get(origin));
          }
        }
      } catch (IllegalAccessException e) {
        Log.err(
            "Illegal access exception trying to copy field: "
                + origin_name
                + " of class: "
                + origin.getClass()
                + " to field: "
                + dest_name
                + " of class: "
                + dest.getClass());
      } catch (InstantiationException e) {
        Log.err(
            "Instantiation exception trying to copy field: "
                + origin_name
                + " of class: "
                + origin.getClass()
                + " to field: "
                + dest_name
                + " of class: "
                + dest.getClass());
      }
    }
  }
コード例 #15
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
 // Pick most common cat level for each cluster_centers' cat columns
 private static double[][] max_cats(double[][] centers, long[][][] cats, String[][] isCats) {
   for (int clu = 0; clu < centers.length; clu++)
     for (int col = 0; col < centers[0].length; col++)
       if (isCats[col] != null) centers[clu][col] = ArrayUtils.maxIndex(cats[clu][col]);
   return centers;
 }
コード例 #16
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
 @Override
 public void reduce(Sampler other) {
   _sampled = ArrayUtils.append(_sampled, other._sampled);
 }
コード例 #17
0
ファイル: GLRMModel.java プロジェクト: vijaykiran/h2o-3
    // public final double[] rproxgrad_x(double[] u, double alpha) { return rproxgrad(u, alpha,
    // _gamma_x, _regularization_x, RandomUtils.getRNG(_seed)); }
    // public final double[] rproxgrad_y(double[] u, double alpha) { return rproxgrad(u, alpha,
    // _gamma_y, _regularization_y, RandomUtils.getRNG(_seed)); }
    static double[] rproxgrad(
        double[] u, double alpha, double gamma, Regularizer regularization, Random rand) {
      if (u == null || alpha == 0 || gamma == 0) return u;
      double[] v = new double[u.length];

      switch (regularization) {
        case None:
          return u;
        case Quadratic:
          for (int i = 0; i < u.length; i++) v[i] = u[i] / (1 + 2 * alpha * gamma);
          return v;
        case L2:
          // Proof uses Moreau decomposition; see section 6.5.1 of Parikh and Boyd
          // https://web.stanford.edu/~boyd/papers/pdf/prox_algs.pdf
          double weight = 1 - alpha * gamma / ArrayUtils.l2norm(u);
          if (weight < 0) return v; // Zero vector
          for (int i = 0; i < u.length; i++) v[i] = weight * u[i];
          return v;
        case L1:
          for (int i = 0; i < u.length; i++)
            v[i] = Math.max(u[i] - alpha * gamma, 0) + Math.min(u[i] + alpha * gamma, 0);
          return v;
        case NonNegative:
          for (int i = 0; i < u.length; i++) v[i] = Math.max(u[i], 0);
          return v;
        case OneSparse:
          int idx = ArrayUtils.maxIndex(u, rand);
          v[idx] = u[idx] > 0 ? u[idx] : 1e-6;
          return v;
        case UnitOneSparse:
          idx = ArrayUtils.maxIndex(u, rand);
          v[idx] = 1;
          return v;
        case Simplex:
          // Proximal gradient algorithm by Chen and Ye in http://arxiv.org/pdf/1101.6081v2.pdf
          // 1) Sort input vector u in ascending order: u[1] <= ... <= u[n]
          int n = u.length;
          int[] idxs = new int[n];
          for (int i = 0; i < n; i++) idxs[i] = i;
          ArrayUtils.sort(idxs, u);

          // 2) Calculate cumulative sum of u in descending order
          // cumsum(u) = (..., u[n-2]+u[n-1]+u[n], u[n-1]+u[n], u[n])
          double[] ucsum = new double[n];
          ucsum[n - 1] = u[idxs[n - 1]];
          for (int i = n - 2; i >= 0; i--) ucsum[i] = ucsum[i + 1] + u[idxs[i]];

          // 3) Let t_i = (\sum_{j=i+1}^n u[j] - 1)/(n - i)
          // For i = n-1,...,1, set optimal t* to first t_i >= u[i]
          double t = (ucsum[0] - 1) / n; // Default t* = (\sum_{j=1}^n u[j] - 1)/n
          for (int i = n - 1; i >= 1; i--) {
            double tmp = (ucsum[i] - 1) / (n - i);
            if (tmp >= u[idxs[i - 1]]) {
              t = tmp;
              break;
            }
          }

          // 4) Return max(u - t*, 0) as projection of u onto simplex
          double[] x = new double[u.length];
          for (int i = 0; i < u.length; i++) x[i] = Math.max(u[i] - t, 0);
          return x;
        default:
          throw new RuntimeException("Unknown regularization function " + regularization);
      }
    }
コード例 #18
0
ファイル: KMeans.java プロジェクト: huamichaelchen/h2o-3
    // Initialize cluster centers
    double[][] initial_centers(
        KMeansModel model,
        final Vec[] vecs,
        final double[] means,
        final double[] mults,
        final int[] modes) {

      // Categoricals use a different distance metric than numeric columns.
      model._output._categorical_column_count = 0;
      _isCats = new String[vecs.length][];
      for (int v = 0; v < vecs.length; v++) {
        _isCats[v] = vecs[v].isCategorical() ? new String[0] : null;
        if (_isCats[v] != null) model._output._categorical_column_count++;
      }

      Random rand = water.util.RandomUtils.getRNG(_parms._seed - 1);
      double centers[][]; // Cluster centers
      if (null != _parms._user_points) { // User-specified starting points
        Frame user_points = _parms._user_points.get();
        int numCenters = (int) user_points.numRows();
        int numCols = model._output.nfeatures();
        centers = new double[numCenters][numCols];
        Vec[] centersVecs = user_points.vecs();
        // Get the centers and standardize them if requested
        for (int r = 0; r < numCenters; r++) {
          for (int c = 0; c < numCols; c++) {
            centers[r][c] = centersVecs[c].at(r);
            centers[r][c] = data(centers[r][c], c, means, mults, modes);
          }
        }
      } else { // Random, Furthest, or PlusPlus initialization
        if (_parms._init == Initialization.Random) {
          // Initialize all cluster centers to random rows
          centers = new double[_parms._k][model._output.nfeatures()];
          for (double[] center : centers) randomRow(vecs, rand, center, means, mults, modes);
        } else {
          centers = new double[1][model._output.nfeatures()];
          // Initialize first cluster center to random row
          randomRow(vecs, rand, centers[0], means, mults, modes);

          model._output._iterations = 0;
          while (model._output._iterations < 5) {
            // Sum squares distances to cluster center
            SumSqr sqr = new SumSqr(centers, means, mults, modes, _isCats).doAll(vecs);

            // Sample with probability inverse to square distance
            Sampler sampler =
                new Sampler(
                        centers,
                        means,
                        mults,
                        modes,
                        _isCats,
                        sqr._sqr,
                        _parms._k * 3,
                        _parms._seed,
                        hasWeightCol())
                    .doAll(vecs);
            centers = ArrayUtils.append(centers, sampler._sampled);

            // Fill in sample centers into the model
            if (!isRunning()) return null; // Stopped/cancelled
            model._output._centers_raw = destandardize(centers, _isCats, means, mults);
            model._output._tot_withinss = sqr._sqr / _train.numRows();

            model._output._iterations++; // One iteration done

            model.update(
                _key); // Make early version of model visible, but don't update progress using
            // update(1)
          }
          // Recluster down to k cluster centers
          centers = recluster(centers, rand, _parms._k, _parms._init, _isCats);
          model._output._iterations = 0; // Reset iteration count
        }
      }
      return centers;
    }