Exemple #1
0
 public double[] pValues() {
   double[] res = zValues();
   RealDistribution rd =
       _dispersionEstimated
           ? new TDistribution(_training_metrics.residualDegreesOfFreedom())
           : new NormalDistribution();
   for (int i = 0; i < res.length; ++i) res[i] = 2 * rd.cumulativeProbability(-Math.abs(res[i]));
   return res;
 }
  public static void main(String[] args) {
    // TODO Auto-generated method stub
    TextParser parser = new TextParser();

    IntegerDistribution id =
        (IntegerDistribution) parser.parseText("uniform()", TextParser.INTEGER);
    for (int i = 1; i < 101; i++) {
      System.out.print(", " + id.sample());
    }
    System.out.print("\n");
    RealDistribution rd = (RealDistribution) parser.parseText("exponential(4,0)", TextParser.REAL);
    for (int i = 1; i < 101; i++) {
      System.out.print(", " + rd.sample());
    }
  }
  /**
   * Initialize weights. This includes steps for doing a random initialization of W as well as the
   * vbias and hbias
   */
  protected void initWeights() {

    if (this.nVisible < 1)
      throw new IllegalStateException("Number of visible can not be less than 1");
    if (this.nHidden < 1)
      throw new IllegalStateException("Number of hidden can not be less than 1");

    if (this.dist == null)
      dist =
          new NormalDistribution(rng, 0, .01, NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
    /*
     * Initialize based on the number of visible units..
     * The lower bound is called the fan in
     * The outer bound is called the fan out.
     *
     * Below's advice works for Denoising AutoEncoders and other
     * neural networks you will use due to the same baseline guiding principles for
     * both RBMs and Denoising Autoencoders.
     *
     * Hinton's Guide to practical RBMs:
     * The weights are typically initialized to small random values chosen from a zero-mean Gaussian with
     * a standard deviation of about 0.01. Using larger random values can speed the initial learning, but
     * it may lead to a slightly worse final model. Care should be taken to ensure that the initial weight
     * values do not allow typical visible vectors to drive the hidden unit probabilities very close to 1 or 0
     * as this significantly slows the learning.
     */
    if (this.W == null) {

      this.W = DoubleMatrix.zeros(nVisible, nHidden);

      for (int i = 0; i < this.W.rows; i++)
        this.W.putRow(i, new DoubleMatrix(dist.sample(this.W.columns)));
    }

    this.wAdaGrad = new AdaGrad(this.W.rows, this.W.columns);

    if (this.hBias == null) {
      this.hBias = DoubleMatrix.zeros(nHidden);
      /*
       * Encourage sparsity.
       * See Hinton's Practical guide to RBMs
       */
      // this.hBias.subi(4);
    }

    this.hBiasAdaGrad = new AdaGrad(hBias.rows, hBias.columns);

    if (this.vBias == null) {
      if (this.input != null) {

        this.vBias = DoubleMatrix.zeros(nVisible);

      } else this.vBias = DoubleMatrix.zeros(nVisible);
    }

    this.vBiasAdaGrad = new AdaGrad(vBias.rows, vBias.columns);
  }
 private int getOccupationTime() {
   double sample = realDistribution.sample();
   if (sample > longestParkingTime) {
     sample = longestParkingTime;
   }
   if (sample < 0) {
     sample = 0;
   }
   return (int) sample + parkingSlotProvider.getObject(parkingSlot).getDistance() * 2;
 }
  public INDArray genGaussianNoise(int id) {
    if (!currentNoise.containsKey(id)) {
      INDArray zeroMean = Nd4j.zeros(inputSize, outputSize);

      currentNoise.put(id, Sampling.normal(RandomUtils.getRandomGenerator(id), zeroMean, noiseVar));
    } else {
      RealDistribution reals =
          new NormalDistribution(
              RandomUtils.getRandomGenerator(id),
              0,
              noiseVarSqrt,
              NormalDistribution.DEFAULT_INVERSE_ABSOLUTE_ACCURACY);
      INDArrayUtils.shiftLeft(
          currentNoise.get(id),
          inputSize,
          outputSize,
          RandomUtils.getRandomGenerator(id).nextInt(inputSize * outputSize),
          reals.sample());
    }

    //		currentNoise = Sampling.normal(RandomUtils.getRandomGenerator(id), zeroMean, noiseVar);

    return currentNoise.get(id);
  }
Exemple #6
0
 public double nextDistributedDouble() {
   return normal.inverseCumulativeProbability(random.nextDouble());
 }