private void getnegphase() {
    /*
     * It does the negative phase of unsupervised RBM training algorithm
     *
     * For details, please refer to Dr. Hinton's paper:
     * Reducing the dimensionality of data with neural networks. Science, Vol. 313. no. 5786, pp. 504 - 507, 28 July 2006.
     */

    // start calculate the negative phase
    // calculate the curved value of v1,h1
    // find the vector of v1
    Matrix negdata = poshidstates.times(vishid.transpose());
    // (1 * numhid) * (numhid * numdims) = (1 * numdims)
    negdata.plusEquals(visbiases);
    // poshidstates*vishid' + visbiases
    double[][] tmp1 = negdata.getArray();
    int i1 = 0;
    while (i1 < numdims) {
      tmp1[0][i1] = 1 / (1 + Math.exp(-tmp1[0][i1]));
      i1++;
    }

    // find the vector of h1
    neghidprobs = negdata.times(vishid);
    // (1 * numdims) * (numdims * numhid) = (1 * numhid)
    neghidprobs.plusEquals(hidbiases);
    double[][] tmp2 = neghidprobs.getArray();
    int i2 = 0;
    while (i2 < numhid) {
      tmp2[0][i2] = 1 / (1 + Math.exp(-tmp2[0][i2]));
      i2++;
    }
    negprods = negdata.transpose().times(neghidprobs);
    // (numdims * 1) *(1 * numhid) = (numdims * numhid)
  }
  public void map(
      LongWritable key,
      Text value,
      OutputCollector<IntWritable, DoubleWritable> output,
      Reporter reporter)
      throws IOException {
    /*
     * It implements the mapper. It outputs the numbers of weight and updated weights.
     *
     * Note that the format of intermediate output is <IntWritable, DoubleWritable>,
     * because the key is the number of weight (an integer), and the value is the weight's value (double)
     */
    inputData = value.toString();

    // go through the process
    initialize();
    getposphase();
    getnegphase();
    update();

    // output the intermediate data
    // The <key, value> pairs are <weightID, weightUpdate>
    double[][] vishidinc_array = vishidinc.getArray();
    for (int i = 0; i < numdims; i++) {
      for (int j = 0; j < numhid; j++) {
        weightPos.set(i * numhid + j);
        weightValue.set(vishidinc_array[i][j]);
        output.collect(weightPos, weightValue);
      }
    }
  }
  public double[][] Eigenvectors(Matrix m) {
    EigenvalueDecomposition decomposition = m.eig();
    Matrix eigenVectorsMatrix = decomposition.getV();
    double[][] eigenvectors = eigenVectorsMatrix.getArray();

    //	   	System.out.println("eigenvectors matrix");
    //	   	eigenVectorsMatrix.print(2,2);

    return eigenvectors;
  }
  private void getposphase() {
    /*
     * It does the positive phase of unsupervised RBM training algorithm
     *
     * For details, please refer to Dr. Hinton's paper:
     * Reducing the dimensionality of data with neural networks. Science, Vol. 313. no. 5786, pp. 504 - 507, 28 July 2006.
     */

    // Start calculate the positive phase
    // calculate the cured value of h0
    poshidprobs = data.times(vishid);
    // (1 * numdims) * (numdims * numhid)
    poshidprobs.plusEquals(hidbiases);
    // data*vishid + hidbiases
    double[][] product_tmp2 = poshidprobs.getArray();
    int i2 = 0;
    while (i2 < numhid) {
      product_tmp2[0][i2] = 1 / (1 + Math.exp(-product_tmp2[0][i2]));
      i2++;
    }
    posprods = data.transpose().times(poshidprobs);
    // (numdims * 1) * (1 * numhid)

    // end of the positive phase calculation, find the binary presentation of h0
    int i3 = 0;
    double[][] tmp1 = poshidprobs.getArray();
    double[][] tmp2 = new double[1][numhid];
    Random randomgenerator = new Random();
    while (i3 < numhid) {
      /*
       * a sampling according to possiblity given by poshidprobs
       */
      if (tmp1[0][i3] > randomgenerator.nextDouble()) tmp2[0][i3] = 1;
      else tmp2[0][i3] = 0;
      i3++;
    }

    // poshidstates is a binary sampling according to possiblity given by poshidprobs
    poshidstates = new Matrix(tmp2);
  }
  private void prop2nextLayer() {
    /*
     * It computes the forward propagation algorithm.
     */
    poshidprobs = data.times(vishid);
    // (1 * numdims) * (numdims * numhid)
    poshidprobs.plusEquals(hidbiases);
    // data*vishid + hidbiases
    double[][] product_tmp2 = poshidprobs.getArray();

    for (int i2 = 0; i2 < numhid; i2++) {
      /*
       * compute the updated input, and write them to newinput
       */
      product_tmp2[0][i2] = 1 / (1 + Math.exp(-product_tmp2[0][i2]));
      newinput[i2] = (int) (product_tmp2[0][i2] * 255.0);
    }
  }
Esempio n. 6
0
File: Main.java Progetto: YpGu/gcoev
  /** toy example */
  public static void test2() {
    int N = 500;
    double[][] m1 = new double[N][N];
    double[][] m2 = new double[N][N];
    double[][] m3 = new double[N][N];

    // init
    Random rand = new Random();
    for (int i = 0; i < N; i++)
      for (int j = 0; j < N; j++) {
        m1[i][j] = 10 * (rand.nextDouble() - 0.2);
        m2[i][j] = 20 * (rand.nextDouble() - 0.8);
      }

    // inverse
    System.out.println("Start");
    Matrix mat1 = new Matrix(m1);
    Matrix mat2 = mat1.inverse();
    Matrix mat3 = mat1.times(mat2);
    double[][] m4 = mat3.getArray();
    /*
       for (int i = 0; i < m4.length; i++) {
         int ss = 10;
         for (int j = 0; j < ss; j++) {
    System.out.printf("%f ", m4[i][j]);
         }
         System.out.print("\n");
       }
       */
    System.out.println("Done");

    /*
        // matrix *
        System.out.println("Start");
        for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) {
          double cell = 0;
          for (int k = 0; k < N; k++)
    	cell += m1[i][k] * m2[k][j];
    //      System.out.printf("%f ", cell);
          m3[i][j] = cell;
        }
        System.out.println("Done");
        */
  }