private void getnegphase() {
    /*
     * It does the negative phase of unsupervised RBM training algorithm
     *
     * For details, please refer to Dr. Hinton's paper:
     * Reducing the dimensionality of data with neural networks. Science, Vol. 313. no. 5786, pp. 504 - 507, 28 July 2006.
     */

    // start calculate the negative phase
    // calculate the curved value of v1,h1
    // find the vector of v1
    Matrix negdata = poshidstates.times(vishid.transpose());
    // (1 * numhid) * (numhid * numdims) = (1 * numdims)
    negdata.plusEquals(visbiases);
    // poshidstates*vishid' + visbiases
    double[][] tmp1 = negdata.getArray();
    int i1 = 0;
    while (i1 < numdims) {
      tmp1[0][i1] = 1 / (1 + Math.exp(-tmp1[0][i1]));
      i1++;
    }

    // find the vector of h1
    neghidprobs = negdata.times(vishid);
    // (1 * numdims) * (numdims * numhid) = (1 * numhid)
    neghidprobs.plusEquals(hidbiases);
    double[][] tmp2 = neghidprobs.getArray();
    int i2 = 0;
    while (i2 < numhid) {
      tmp2[0][i2] = 1 / (1 + Math.exp(-tmp2[0][i2]));
      i2++;
    }
    negprods = negdata.transpose().times(neghidprobs);
    // (numdims * 1) *(1 * numhid) = (numdims * numhid)
  }
  private void prop2nextLayer() {
    /*
     * It computes the forward propagation algorithm.
     */
    poshidprobs = data.times(vishid);
    // (1 * numdims) * (numdims * numhid)
    poshidprobs.plusEquals(hidbiases);
    // data*vishid + hidbiases
    double[][] product_tmp2 = poshidprobs.getArray();

    for (int i2 = 0; i2 < numhid; i2++) {
      /*
       * compute the updated input, and write them to newinput
       */
      product_tmp2[0][i2] = 1 / (1 + Math.exp(-product_tmp2[0][i2]));
      newinput[i2] = (int) (product_tmp2[0][i2] * 255.0);
    }
  }
예제 #3
0
파일: Main.java 프로젝트: YpGu/gcoev
  /** toy example */
  public static void test2() {
    int N = 500;
    double[][] m1 = new double[N][N];
    double[][] m2 = new double[N][N];
    double[][] m3 = new double[N][N];

    // init
    Random rand = new Random();
    for (int i = 0; i < N; i++)
      for (int j = 0; j < N; j++) {
        m1[i][j] = 10 * (rand.nextDouble() - 0.2);
        m2[i][j] = 20 * (rand.nextDouble() - 0.8);
      }

    // inverse
    System.out.println("Start");
    Matrix mat1 = new Matrix(m1);
    Matrix mat2 = mat1.inverse();
    Matrix mat3 = mat1.times(mat2);
    double[][] m4 = mat3.getArray();
    /*
       for (int i = 0; i < m4.length; i++) {
         int ss = 10;
         for (int j = 0; j < ss; j++) {
    System.out.printf("%f ", m4[i][j]);
         }
         System.out.print("\n");
       }
       */
    System.out.println("Done");

    /*
        // matrix *
        System.out.println("Start");
        for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) {
          double cell = 0;
          for (int k = 0; k < N; k++)
    	cell += m1[i][k] * m2[k][j];
    //      System.out.printf("%f ", cell);
          m3[i][j] = cell;
        }
        System.out.println("Done");
        */
  }
  private void getposphase() {
    /*
     * It does the positive phase of unsupervised RBM training algorithm
     *
     * For details, please refer to Dr. Hinton's paper:
     * Reducing the dimensionality of data with neural networks. Science, Vol. 313. no. 5786, pp. 504 - 507, 28 July 2006.
     */

    // Start calculate the positive phase
    // calculate the cured value of h0
    poshidprobs = data.times(vishid);
    // (1 * numdims) * (numdims * numhid)
    poshidprobs.plusEquals(hidbiases);
    // data*vishid + hidbiases
    double[][] product_tmp2 = poshidprobs.getArray();
    int i2 = 0;
    while (i2 < numhid) {
      product_tmp2[0][i2] = 1 / (1 + Math.exp(-product_tmp2[0][i2]));
      i2++;
    }
    posprods = data.transpose().times(poshidprobs);
    // (numdims * 1) * (1 * numhid)

    // end of the positive phase calculation, find the binary presentation of h0
    int i3 = 0;
    double[][] tmp1 = poshidprobs.getArray();
    double[][] tmp2 = new double[1][numhid];
    Random randomgenerator = new Random();
    while (i3 < numhid) {
      /*
       * a sampling according to possiblity given by poshidprobs
       */
      if (tmp1[0][i3] > randomgenerator.nextDouble()) tmp2[0][i3] = 1;
      else tmp2[0][i3] = 0;
      i3++;
    }

    // poshidstates is a binary sampling according to possiblity given by poshidprobs
    poshidstates = new Matrix(tmp2);
  }
  // update the weights and biases
  // This serves as a reducer
  private void update() {
    /*
     * It computes the update of weights using previous results and parameters
     *
     * For details, please refer to Dr. Hinton's paper:
     * Reducing the dimensionality of data with neural networks. Science, Vol. 313. no. 5786, pp. 504 - 507, 28 July 2006.
     */
    double momentum;
    // if (epoch > 5)
    //        momentum = finalmomentum;
    // else
    //        momentum = initialmomentum;
    // vishidinc = momentum*vishidinc + epsilonw*( (posprods-negprods)/numcases -
    // weightcost*vishid);
    // vishidinc.timesEquals(momentum);
    Matrix temp1 = posprods.minus(negprods);
    Matrix temp2 = vishid.times(weightcost);
    temp1.minusEquals(temp2);
    temp1.timesEquals(epsilonw);

    // the final updates of weights are written in vishidinc
    vishidinc.plusEquals(temp1);
  }
예제 #6
0
파일: Main.java 프로젝트: YpGu/gcoev
  /*
   * backward pass 1: update
   *  (1) \hat{mu} (mu_hat_s)
   *  (2) \hat{grad_mu} (grad_mu_hat_s)
   *  (3) \hat{V} (v_hat_s)
   */
  public static void backward1(boolean update_grad) {
    for (int t1 = T - 1; t1 > t0; t1--) {
      int t = t1 - t0;
      //      System.out.println("backward 1;\tt = " + t1);
      if (t != T - 1 - t0) {
        double V_pre_t = v_s.get(t - 1); // V^{t-1}
        double V_hat_t = v_hat_s.get(t); // \hat{V}^{t}
        double[][] mu_pre_t = mu_s.get(t - 1); // \mu^{t-1}
        double[][] mu_hat_t = mu_hat_s.get(t); // \hat{\mu}^{t}  [t-1]

        Matrix A_pre_t = new Matrix(AS.get(t - 1)); // A^{t-1}
        Matrix hprime_pre_t = new Matrix(h_prime_s.get(t - 1)); // h'^{t-1}
        Matrix ave_neighbors = A_pre_t.times(hprime_pre_t); // n * 1

        /* calculate \hat{\mu} at time t-1 */
        double factor_1 =
            (1 - lambda) * V_pre_t / (sigma * sigma + (1 - lambda) * (1 - lambda) * V_pre_t);
        double factor_2 = (sigma * sigma) / (sigma * sigma + (1 - lambda) * (1 - lambda) * V_pre_t);
        double[][] mu_hat_pre_t = new double[n][K];
        for (int i = 0; i < n; i++)
          for (int k = 0; k < K; k++) {
            mu_hat_pre_t[i][k] =
                factor_1 * (mu_hat_t[i][k] - lambda * ave_neighbors.get(i, k))
                    + factor_2 * mu_pre_t[i][k];
          }
        /* calculate \hat{V} at time t-1 */
        double V_hat_pre_t =
            V_pre_t
                + factor_1
                    * factor_1
                    * (V_hat_t - (1 - lambda) * (1 - lambda) * V_pre_t - (sigma * sigma));

        /* update \mu and V */
        mu_hat_s.set(t - 1, mu_hat_pre_t);
        v_hat_s.set(t - 1, V_hat_pre_t);

        /* calculate and update grad_mu_hat at time t-1 */
        if (update_grad)
          for (int s = 0; s < T - t0; s++) {
            double[][] grad_hat_t_s = grad_mu_hat_s.get(t * (T - t0) + s);
            double[][] grad_pre_t_s = grad_mu_s.get((t - 1) * (T - t0) + s);
            double[][] grad_hat_pre_t_s = new double[n][K];
            for (int i = 0; i < n; i++)
              for (int k = 0; k < K; k++) {
                grad_hat_pre_t_s[i][k] =
                    factor_1 * grad_hat_t_s[i][k] + factor_2 * grad_pre_t_s[i][k];
              }
            grad_mu_hat_s.set((t - 1) * (T - t0) + s, grad_hat_pre_t_s);
          }
      } else {
        /*
         * initial condition for backward pass:
         *  (1) \hat{mu}^{T} = mu^{T}
         *  (2) \hat{V}^{T} = V^{T}
         *  (3) \hat{grad_mu}^{T/s} = grad_mu^{T/s}, \forall s
         */
        mu_hat_s.set(t, mu_s.get(t));
        v_hat_s.set(t, v_s.get(t));
        if (update_grad)
          for (int s = 0; s < T - t0; s++) {
            grad_mu_hat_s.set(t * (T - t0) + s, grad_mu_s.get(t * (T - t0) + s));
          }
      }
      //      Scanner sc = new Scanner(System.in);
      //      int gu; gu = sc.nextInt();
      /* end for each t */
    }
  }
예제 #7
0
파일: Main.java 프로젝트: YpGu/gcoev
  /**
   * forward pass 1: update intrinsic features (1) mu (mu_s) (2) grad_mu (grad_mu_s) (3) variance V
   * (v_s)
   */
  public static void forward1(boolean update_grad, int iter) {
    /*
       if (iter == 4) {
         int t = 15;
         double[][] h_t = new double[n][K];
         double[][] h_hat_t = new double[n][K];
         double[][] h_prime_t = new double[n][K];
         double[][] h_hat_prime_t = new double[n][K];
         for (int i = 0; i < n; i++) for (int k = 0; k < K; k++) {
    h_t[i][k] = h_s.get(t-1)[i][k];
    h_hat_t[i][k] = h_hat_s.get(t-1)[i][k];
    h_prime_t[i][k] = h_prime_s.get(t-1)[i][k];
    h_hat_prime_t[i][k] = h_hat_prime_s.get(t-1)[i][k];
         }
         h_s.set(t, h_t); h_hat_s.set(t, h_hat_t);
         h_prime_s.set(t, h_prime_t); h_hat_prime_s.set(t, h_hat_prime_t);
       }
       */

    for (int t = 0; t < T - t0; t++) {
      //      System.out.println("forward 1;\tt = " + t1);
      if (t != 0) {
        double delta_t = delta_s.get(t); // delta_t
        double[][] h_hat_t = h_hat_s.get(t); // \hat{h}^t  [t]
        double[][] mu_pre_t = mu_s.get(t - 1); // mu^{t-1} (N*1)
        double V_pre_t = v_s.get(t - 1); // V^{t-1}

        Matrix a = new Matrix(AS.get(t - 1)); // A^{t-1}
        Matrix hprime_pre_t = new Matrix(h_prime_s.get(t - 1)); // h'^{t-1}
        Matrix ave_neighbors = a.times(hprime_pre_t);

        /* calculate \mu */
        double[][] mu_t = new double[n][K];
        double factor_1 =
            (delta_t * delta_t)
                / (delta_t * delta_t + sigma * sigma + (1 - lambda) * (1 - lambda) * V_pre_t);
        double factor_2 =
            (sigma * sigma + (1 - lambda) * (1 - lambda) * V_pre_t)
                / (delta_t * delta_t + sigma * sigma + (1 - lambda) * (1 - lambda) * V_pre_t);
        for (int i = 0; i < n; i++)
          for (int k = 0; k < K; k++) {
            mu_t[i][k] =
                factor_1 * ((1 - lambda) * mu_pre_t[i][k] + lambda * ave_neighbors.get(i, k))
                    + factor_2 * h_hat_t[i][k];
          }
        /* calculate V */
        double V_t = factor_2 * delta_t * delta_t;

        /* update \mu and V */
        mu_s.set(t, mu_t);
        v_s.set(t, V_t);

        /* calculate and update grad_mu */
        if (update_grad)
          for (int s = 0; s < T - t0; s++) {
            double[][] grad_pre_t_s = grad_mu_s.get((t - 1) * (T - t0) + s);
            double[][] grad_t_s = new double[n][K];
            for (int i = 0; i < n; i++)
              for (int k = 0; k < K; k++) {
                grad_t_s[i][k] = factor_1 * (1 - lambda) * grad_pre_t_s[i][k];
                if (t == s) {
                  grad_t_s[i][k] += factor_2;
                }
              }
            grad_mu_s.set(t * (T - t0) + s, grad_t_s);
          }
      } else {
        /* mu, V: random init (keep unchanged) */
        /* grad_mu: set to 0 (keep unchanged) */
      }
      //      Scanner sc = new Scanner(System.in);
      //      int gu; gu = sc.nextInt();
      /* end for each t */
    }
  }
예제 #8
0
파일: Main.java 프로젝트: YpGu/gcoev
  public static void compute_gradient2(int iteration) {
    double[][][] tmp_grad_h_hat_prime_s = new double[T - t0][n][K];

    /*
     * compute
     *	  nti[t][i] = \sum_{j} { n_{ij} }
     * and
     *	  nti_h[t][j][k] = \sum_{i} { n_{ij}^{t} h_{ik}^{t} }
     */
    double[][] nti = new double[T - t0][n];
    double[][][] nti_h = new double[T - t0][n][K];
    for (int t = 0; t < T - t0; t++) {
      double[][] G_t = GS.get(t);
      double[][] h_t = h_s.get(t); // h^{t}
      for (int i = 0; i < n; i++)
        for (int j = 0; j < n; j++) {
          nti[t][i] += G_t[i][j];
          for (int k = 0; k < K; k++) {
            nti_h[t][j][k] += G_t[i][j] * h_t[i][k];
          }
        }
    }

    for (int t = 0; t < T - t0; t++) {
      double delta_t = delta_prime_s.get(t);
      double[][] h_t = h_s.get(t); // h^{t}
      double[][] h_hat_prime_t = h_hat_prime_s.get(t); // \hat{h}^{t}
      double[][] mu_hat_t = mu_hat_s.get(t); // \hat{\mu}^{t}
      double[][] mu_hat_prime_t = mu_hat_prime_s.get(t); // \hat{\mu}'^{t}
      double[][] h_prime_t = h_prime_s.get(t);

      if (t != 0) {
        Matrix a = new Matrix(AS.get(t - 1));
        Matrix hprime_pre_t = new Matrix(h_prime_s.get(t - 1));
        Matrix ave_neighbors = a.times(hprime_pre_t);

        double[][] G_pre_t = GS.get(t - 1); // G^{t-1}
        double[][] A_pre_t = AS.get(t - 1); // A^{t-1}
        double[][] h_pre_t = h_s.get(t - 1); // h^{t-1}
        double[][] mu_hat_prime_pre_t = mu_hat_prime_s.get(t - 1); // \hat{\mu}'^{t-1}  [t]

        for (int s = 0; s < T - t0; s++) {
          double[][] grad_mu_hat_prime_t = grad_mu_hat_prime_s.get(t * (T - t0) + s);
          double[][] grad_mu_hat_prime_pre_t = grad_mu_hat_prime_s.get((t - 1) * (T - t0) + s);
          double[] h2delta2 = new double[n];
          for (int i = 0; i < n; i++)
            for (int k = 0; k < K; k++) {
              h2delta2[i] += 0.5 * h_t[i][k] * h_t[i][k] * delta_t * delta_t;
            }

          /* compute weighted_exp for later use */
          double[][][] weighted_exp_num = new double[K][n][n];
          double[][] weighted_exp_den = new double[K][n];
          double[][][] weighted_exp = new double[K][n][n];
          for (int i = 0; i < n; i++)
            for (int j = 0; j < n; j++) {
              double h_muhp = Operations.inner_product(h_t[j], mu_hat_prime_t[i], K);
              for (int k = 0; k < K; k++) {
                weighted_exp_num[k][i][j] = h_t[j][k] * Math.exp(h_muhp + h2delta2[j]);
                weighted_exp_den[k][j] += Math.exp(h_muhp + h2delta2[j]);
              }
            }
          for (int i = 0; i < n; i++)
            for (int j = 0; j < n; j++)
              for (int k = 0; k < K; k++) {
                weighted_exp[k][i][j] = weighted_exp_num[k][i][j] / weighted_exp_den[k][j];
              }
          /* compute sum_mu_hat_prime for later use */
          double[] sum_mu_hat_prime = new double[K];
          for (int i = 0; i < n; i++)
            for (int k = 0; k < K; k++) {
              sum_mu_hat_prime[k] += mu_hat_prime_pre_t[i][k];
            }

          for (int i = 0; i < n; i++)
            for (int k = 0; k < K; k++) {
              /* first term */
              double g1 = nti_h[t][i][k] * grad_mu_hat_prime_t[i][k];
              tmp_grad_h_hat_prime_s[s][i][k] += g1;

              /* second term */
              double g2 = 0;
              for (int j = 0; j < n; j++) {
                g2 -= nti[t][j] * weighted_exp[k][i][j] * grad_mu_hat_prime_t[i][k];
              }
              tmp_grad_h_hat_prime_s[s][i][k] += g2;

              /* third term */
              for (int j = 0; j < n; j++)
                if (G_pre_t[j][i] != 0) {
                  //	      double g3 = ( h_t[j][k] - (1-lambda) * h_pre_t[j][k] - lambda *
                  // A_pre_t[j][i] * sum_mu_hat_prime[k] )
                  double g3 =
                      (h_t[j][k]
                              - (1 - lambda) * h_pre_t[j][k]
                              - lambda * A_pre_t[j][i] * mu_hat_prime_pre_t[i][k])
                          * lambda
                          * A_pre_t[j][i]
                          * grad_mu_hat_prime_pre_t[i][k]
                          / (sigma * sigma);
                  tmp_grad_h_hat_prime_s[s][j][k] += g3; // j instead of i!
                }
            }

          /* fourth term */
          for (int i = 0; i < n; i++)
            for (int k = 0; k < K; k++) {
              double g4 =
                  -(mu_hat_prime_t[i][k] - mu_hat_prime_pre_t[i][k])
                      * (grad_mu_hat_prime_t[i][k] - grad_mu_hat_prime_pre_t[i][k])
                      / (sigma * sigma);
              tmp_grad_h_hat_prime_s[s][i][k] += g4;
            }
        }
      } else {
        /*
        for (int s = 0; s < T-t0; s++) {
          double[] grad_mu_hat_prime_t = grad_mu_hat_prime_s.get(t * (T-t0) + s);
          for (int i = 0; i < n; i++) {
            // first term
            double g1 = nti_hp[t][i] * grad_mu_hat_prime_t[i];
            tmp_grad_h_hat_prime_s[s][i] += g1;

            // second term
            double g2 = 0;
            for (int _j = 0; _j < NEG; _j++) {
              double weighted_exp_num = 0, weighted_exp_den = 0;
              int j = neg_samples.get(t)[i][_j];
              double htj = h_t[j][0]; double muhti = mu_hat_t[i];
              weighted_exp_num += htj * Math.exp(htj * muhti + 0.5 * htj * htj * delta_t * delta_t);
              for (int _k = 0; _k < NEG; _k++) {
        	int k = neg_samples.get(t)[i][_k];
        	double muhtk = mu_hat_t[k];
        	weighted_exp_den += Math.exp(htj * muhtk + 0.5 * htj * htj * delta_t * delta_t);
              }
              g2 -= nti[t][j] * weighted_exp_num / weighted_exp_den * grad_mu_hat_prime_t[i];
            }
            tmp_grad_h_hat_prime_s[s][i] += g2;
          }

          // fourth term (if any)
          if (s == t) for (int i = 0; i < n; i++) {
            double g4 = -h_hat_prime_t[i][0] / (sigma*sigma);
            tmp_grad_h_hat_prime_s[s][i] += g4;
          }
        }
        */
      }
    }

    /* update global gradient */
    for (int t = 0; t < T - t0; t++) {
      double[][] grad = new double[n][K];
      for (int i = 0; i < n; i++)
        for (int k = 0; k < K; k++) {
          grad[i][k] = tmp_grad_h_hat_prime_s[t][i][k];
        }
      grad_h_hat_prime_s.set(t, grad);
    }
    FileParser.output_2d(grad_h_hat_prime_s, "./grad/grad_prime_" + iteration + ".txt");

    return;
  }
예제 #9
0
파일: Main.java 프로젝트: YpGu/gcoev
  public static void compute_gradient1(int iteration) {
    double[][][] tmp_grad_h_hat_s = new double[T - t0][n][K];

    for (int t = 0; t < T - t0; t++) {
      //      System.out.println("compute gradient 1, t = " + t);
      double delta_t = delta_s.get(t);
      double[][] G_t = GS.get(t);
      double[][] h_prime_t = h_prime_s.get(t);
      double[][] mu_hat_t = mu_hat_s.get(t);

      if (t != 0) {
        double[][] mu_hat_pre_t = mu_hat_s.get(t - 1);

        Matrix a = new Matrix(AS.get(t - 1));
        Matrix hprime_pre_t = new Matrix(h_prime_s.get(t - 1));
        Matrix ave_neighbors = a.times(hprime_pre_t);

        /* TODO: check whether we can save computation by comparing s and t */
        for (int s = 0; s < T - t0; s++) {
          double[][] grad_hat_t = grad_mu_hat_s.get(t * (T - t0) + s);
          double[][] grad_hat_pre_t = grad_mu_hat_s.get((t - 1) * (T - t0) + s);
          double[] hp2delta2 = new double[n];
          for (int i = 0; i < n; i++)
            for (int k = 0; k < K; k++) {
              hp2delta2[i] += 0.5 * h_prime_t[i][k] * h_prime_t[i][k] * delta_t * delta_t;
            }

          for (int i = 0; i < n; i++) {
            /* first term */
            double[] weighted_exp_num = new double[K];
            double weighted_exp_den = 0;
            for (int l = 0; l < n; l++) {
              double hp_muh = Operations.inner_product(h_prime_t[l], mu_hat_t[i], K);
              double e = Math.exp(hp_muh + hp2delta2[l]);
              if (Double.isNaN(e)) {
                /* check if e explodes */
                System.out.println("ERROR2");
                Scanner sc = new Scanner(System.in);
                int gu;
                gu = sc.nextInt();
              }
              for (int k = 0; k < K; k++) {
                weighted_exp_num[k] += h_prime_t[l][k] * e;
                weighted_exp_den += e;
              }
            }
            for (int j = 0; j < n; j++)
              for (int k = 0; k < K; k++) {
                double weighted_exp = weighted_exp_num[k] / weighted_exp_den;
                double gi1 = G_t[i][j] * grad_hat_t[i][k] * (h_prime_t[j][k] - weighted_exp);
                tmp_grad_h_hat_s[s][i][k] += gi1;
              }

            /* second term */
            for (int k = 0; k < K; k++) {
              double gi2 =
                  -(mu_hat_t[i][k]
                          - (1 - lambda) * mu_hat_pre_t[i][k]
                          - lambda * ave_neighbors.get(i, k))
                      * (grad_hat_t[i][k] - (1 - lambda) * grad_hat_pre_t[i][k])
                      / (sigma * sigma);
              tmp_grad_h_hat_s[s][i][k] += gi2;
            }
          }
        }
      } else {
        /* no such term (t=0) in ELBO */
        /*
        for (int s = 0; s < T-t0; s++) {
          double[] grad_hat_t = grad_mu_hat_s.get(t * (T-t0) + s);

          for (int i = 0; i < n; i++) {
            double n_it = 0;
            for (int j = 0; j < n; j++) n_it += G_t[i][j];

            // first term
            double gi1 = -mu_hat_t[i] * grad_hat_t[i] / (sigma * sigma);
            tmp_grad_h_hat_s[s][i] += gi1;

            // second term
            double gi2 = 0;
            double weighted_exp_num = 0, weighted_exp_den = 0;
            for (int j = 0; j < NEG; j++) {
              int l = neg_samples.get(t)[i][j];
              double hpl = h_prime_t[l][0];
              double muit = mu_hat_t[i];
              double e = Math.exp(hpl * muit + 0.5 * hpl * hpl * delta_t * delta_t);
              // TODO: check if e explodes
              if (Double.isNaN(e)) {
        	System.out.println("ERROR3");
        	Scanner sc = new Scanner(System.in);
        	int gu; gu = sc.nextInt();
              }
              weighted_exp_num += hpl * e;
              weighted_exp_den += e;
            }
            double weighted_exp = weighted_exp_num / weighted_exp_den;
            for (int j = 0; j < n; j++) {
              gi2 += G_t[i][j] * grad_hat_t[i] * (h_prime_t[j][0] - weighted_exp);
            }
            tmp_grad_h_hat_s[s][i] += gi2;
          }
        }
        */
      }
      /* end if-else */
    }

    /* update global gradient */
    for (int t = 0; t < T - t0; t++) {
      double[][] grad = new double[n][K];
      for (int i = 0; i < n; i++)
        for (int k = 0; k < K; k++) {
          grad[i][k] = tmp_grad_h_hat_s[t][i][k];
        }
      grad_h_hat_s.set(t, grad);
    }
    FileParser.output_2d(grad_h_hat_s, "./grad/grad_" + iteration + ".txt");

    return;
  }
예제 #10
0
파일: Main.java 프로젝트: YpGu/gcoev
  /** compute_objective1: return the lower bound when h' is fixed */
  public static double compute_objective1() {
    double res = 0;
    for (int t = 0; t < T - t0; t++) {
      if (t != 0) {
        double[][] G_t = GS.get(t);
        double[][] h_prime_t = h_prime_s.get(t);
        double[][] h_prime_pre_t = h_prime_s.get(t - 1);
        double[][] mu_hat_t = mu_hat_s.get(t);
        double[][] mu_hat_pre_t = mu_hat_s.get(t - 1);
        double delta_t = delta_s.get(t);

        Matrix a = new Matrix(AS.get(t - 1));
        Matrix hprime_pre_t = new Matrix(h_prime_s.get(t - 1));
        Matrix ave_neighbors = a.times(hprime_pre_t);

        double[] hp2delta2 = new double[n];
        for (int i = 0; i < n; i++)
          for (int k = 0; k < K; k++) {
            hp2delta2[i] += 0.5 * h_prime_t[i][k] * h_prime_t[i][k] * delta_t * delta_t;
          }

        for (int i = 0; i < n; i++) {
          /* first term */
          List<Double> powers = new ArrayList<Double>();
          for (int l = 0; l < n; l++) {
            double hp_muh = Operations.inner_product(h_prime_t[l], mu_hat_t[i], K);
            powers.add(hp_muh + hp2delta2[l]);
          }
          double lse = log_sum_exp(powers);

          for (int j = 0; j < n; j++)
            if (G_t[i][j] != 0) {
              double hp_muh = Operations.inner_product(h_prime_t[j], mu_hat_t[i], K);
              res += G_t[i][j] * (hp_muh - lse);
            }

          /* second term */
          for (int k = 0; k < K; k++) {
            double diff =
                mu_hat_t[i][k]
                    - (1 - lambda) * mu_hat_pre_t[i][k]
                    - lambda * ave_neighbors.get(i, k);
            res -= 0.5 * diff * diff / (sigma * sigma);
          }
        }
      } else {
        /*
        double[][] G_t = GS.get(t);
        double[][] h_prime_t = h_prime_s.get(t);
        double[] mu_hat_t = mu_hat_s.get(t);
        double delta_t = delta_s.get(t);
        int[][] neg_sam_t = neg_samples.get(t);

        for (int i = 0; i < n; i++) {
          // first term
          for (int j = 0; j < n; j++) if (G_t[i][j] != 0) {
            List<Double> powers = new ArrayList<Double>();
            for (int _l = 0; _l < NEG; _l++) {
              int l = neg_sam_t[i][_l];
              powers.add(h_prime_t[l][0] * mu_hat_t[i]
        	  + 0.5 * h_prime_t[l][0] * h_prime_t[l][0] * delta_t * delta_t);
            }
            double lse = log_sum_exp(powers);
            res += G_t[i][j] * (h_prime_t[j][0] * mu_hat_t[i] - lse);
          }
        }
        */
      }
    }
    return res;
  }