コード例 #1
0
    public Double eval(Double gamma) {
      double c = 1.0 / (double) Math.sqrt(variance);
      double sum = 0.0;
      for (Integer i : transcripts.get(gammat)) {
        double pi = 0.0;

        for (int t = 0; t < fiveprime.length; t++) {
          if (transcripts.get(t).contains(i)) {
            double gammai = gammat == t ? gamma : gammas[t][gammak];
            double dit = delta(i, t);

            double pit = gammai * Math.exp(-lambda * dit);
            pi += pit;
          }
        }

        double zi = Math.log(pi);
        double err = data.values(i)[channels[gammak]] - zi;
        double ratio = (Math.exp(-lambda * delta(i, gammat))) / pi;
        double termi = (err * ratio);

        sum += termi;
      }

      return c * sum;
    }
コード例 #2
0
  private void getnegphase() {
    /*
     * It does the negative phase of unsupervised RBM training algorithm
     *
     * For details, please refer to Dr. Hinton's paper:
     * Reducing the dimensionality of data with neural networks. Science, Vol. 313. no. 5786, pp. 504 - 507, 28 July 2006.
     */

    // start calculate the negative phase
    // calculate the curved value of v1,h1
    // find the vector of v1
    Matrix negdata = poshidstates.times(vishid.transpose());
    // (1 * numhid) * (numhid * numdims) = (1 * numdims)
    negdata.plusEquals(visbiases);
    // poshidstates*vishid' + visbiases
    double[][] tmp1 = negdata.getArray();
    int i1 = 0;
    while (i1 < numdims) {
      tmp1[0][i1] = 1 / (1 + Math.exp(-tmp1[0][i1]));
      i1++;
    }

    // find the vector of h1
    neghidprobs = negdata.times(vishid);
    // (1 * numdims) * (numdims * numhid) = (1 * numhid)
    neghidprobs.plusEquals(hidbiases);
    double[][] tmp2 = neghidprobs.getArray();
    int i2 = 0;
    while (i2 < numhid) {
      tmp2[0][i2] = 1 / (1 + Math.exp(-tmp2[0][i2]));
      i2++;
    }
    negprods = negdata.transpose().times(neghidprobs);
    // (numdims * 1) *(1 * numhid) = (numdims * numhid)
  }
コード例 #3
0
ファイル: NeuralNetwork.java プロジェクト: geomois/ciex2
 private ArrayList<Double> forwardProp(Double[] input) {
   for (int i = 0; i < hiddenLNo; i++) {
     for (int j = 0; j < inputSize + 1; j++) {
       if (j == inputSize) {
         hiddenNodes.set(i, hiddenNodes.get(i) + w1[j][i] * bias);
       } else {
         hiddenNodes.set(i, hiddenNodes.get(i) + w1[j][i] * input[j]);
       }
     }
     double temp = (1.0 / (1.0 + Math.exp(-hiddenNodes.get(i))));
     hiddenNodes.set(i, temp);
   }
   for (int i = 0; i < outputLNo; i++) {
     for (int j = 0; j < hiddenLNo + 1; j++) {
       if (j == hiddenNodes.size()) {
         outputNodes.set(i, outputNodes.get(i) + w2[j][i] * bias);
       } else {
         outputNodes.set(i, outputNodes.get(i) + w2[j][i] * hiddenNodes.get(j));
       }
     }
     //			double temp1 = (1.0 / (1.0 + Math.exp(-outputNodes.get(i))));
     outputNodes.set(i, (1.0 / (1.0 + Math.exp(-outputNodes.get(i)))));
   }
   return outputNodes;
 }
コード例 #4
0
    public Double eval(Double gamma) {
      double c1 = 0.0;
      double c3 = -1.0 / (double) Math.sqrt(2.0 * variance);
      double sum = 0.0;

      double cpart = -0.5 * Math.log(Math.sqrt(2.0 * Math.PI * variance));

      for (Integer i : transcripts.get(gammat)) {
        c1 += cpart;
        double pi = 0.0;

        for (int t = 0; t < fiveprime.length; t++) {
          if (transcripts.get(t).contains(i)) {
            double gammai = gammat == t ? gamma : gammas[t][gammak];
            double dit = delta(i, t);

            double pit = gammai * Math.exp(-lambda * dit);
            pi += pit;
          }
        }

        double zi = Math.log(pi);
        double err = data.values(i)[channels[gammak]] - zi;

        sum += (err * err);
      }

      return c1 + c3 * sum;
    }
コード例 #5
0
  public double error() {
    double sum = 0.0;

    for (int i = 0; i < data.size(); i++) {
      for (int k = 0; k < channels.length; k++) {
        double pi = 0.0;

        for (Integer t : transcripts.keySet()) {
          if (transcripts.get(t).contains(i)) {
            double gammai = gammas[t][k];
            double dit = delta(i, t);

            double pit = gammai * Math.exp(-lambda * dit);
            pi += pit;
          }
        }

        double zi = Math.log(pi);
        double err = Math.abs(data.values(i)[channels[k]] - zi);

        sum += err;
      }
    }

    return sum;
  }
コード例 #6
0
    public Double eval(Double lmbda) {
      double c = 1.0 / (double) Math.sqrt(variance);
      double sum = 0.0;

      for (int i = 0; i < data.size(); i++) {
        for (int k = 0; k < channels.length; k++) {
          double pi = 0.0;
          double pdi = 0.0;

          for (Integer t : transcripts.keySet()) {
            if (transcripts.get(t).contains(i)) {
              double gammai = gammas[t][k];
              double dit = delta(i, t);

              double falloff = Math.exp(-lmbda * dit);
              double pit = gammai * falloff;
              double pdit = pit * dit;

              pi += pit;
              pdi += pdit;
            }
          }

          double zi = Math.log(pi);
          double err = data.values(i)[channels[k]] - zi;
          double ratio = pdi / pi;
          double termi = (err * ratio);

          sum += termi;
        }
      }

      return c * sum;
    }
コード例 #7
0
ファイル: QuantileDigest.java プロジェクト: cyenjung/pinot
  private void rescale(long newLandmarkInSeconds) {
    // rescale the weights based on a new landmark to avoid numerical overflow issues

    final double factor = Math.exp(-alpha * (newLandmarkInSeconds - landmarkInSeconds));

    weightedCount *= factor;

    postOrderTraversal(
        root,
        new Callback() {
          @Override
          public boolean process(Node node) {
            double oldWeight = node.weightedCount;

            node.weightedCount *= factor;

            if (oldWeight >= ZERO_WEIGHT_THRESHOLD && node.weightedCount < ZERO_WEIGHT_THRESHOLD) {
              --nonZeroNodeCount;
            }

            return true;
          }
        });

    landmarkInSeconds = newLandmarkInSeconds;
  }
コード例 #8
0
ファイル: AddExpression.java プロジェクト: bigbigbug/wekax
 /**
  * Apply this operator (function) to the supplied argument
  *
  * @param value the argument
  * @return the result
  */
 protected double applyFunction(double value) {
   switch (m_operator) {
     case 'l':
       return Math.log(value);
     case 'b':
       return Math.abs(value);
     case 'c':
       return Math.cos(value);
     case 'e':
       return Math.exp(value);
     case 's':
       return Math.sqrt(value);
     case 'f':
       return Math.floor(value);
     case 'h':
       return Math.ceil(value);
     case 'r':
       return Math.rint(value);
     case 't':
       return Math.tan(value);
     case 'n':
       return Math.sin(value);
   }
   return Double.NaN;
 }
コード例 #9
0
    public Double eval(Double lmbda) {
      double c1 = 0.0;
      double c3 = -1.0 / (double) Math.sqrt(2.0 * variance);
      double sum = 0.0;

      double cpart = -0.5 * Math.log(Math.sqrt(2.0 * Math.PI * variance));

      for (int i = 0; i < data.size(); i++) {
        for (int k = 0; k < channels.length; k++) {
          c1 += cpart;
          double pi = 0.0;

          for (Integer t : transcripts.keySet()) {
            if (transcripts.get(t).contains(i)) {
              double dit = delta(i, t);
              double gammai = gammas[t][k];
              double pit = gammai * Math.exp(-lmbda * dit);
              pi += pit;
            }
          }

          double zi = Math.log(pi);
          double err = data.values(i)[channels[k]] - zi;

          sum += (err * err);
        }
      }

      return c1 + c3 * sum;
    }
コード例 #10
0
ファイル: StatFunctions.java プロジェクト: Balkanlii/nlp
  public static double qt(double p, double ndf, boolean lower_tail) {
    // Algorithm 396: Student's t-quantiles by
    // G.W. Hill CACM 13(10), 619-620, October 1970
    if (p <= 0 || p >= 1 || ndf < 1)
      throw new IllegalArgumentException("Invalid p or df in call to qt(double,double,boolean).");
    double eps = 1e-12;
    double M_PI_2 = 1.570796326794896619231321691640; // pi/2
    boolean neg;
    double P, q, prob, a, b, c, d, y, x;
    if ((lower_tail && p > 0.5) || (!lower_tail && p < 0.5)) {
      neg = false;
      P = 2 * (lower_tail ? (1 - p) : p);
    } else {
      neg = true;
      P = 2 * (lower_tail ? p : (1 - p));
    }

    if (Math.abs(ndf - 2) < eps) {
        /* df ~= 2 */
      q = Math.sqrt(2 / (P * (2 - P)) - 2);
    } else if (ndf < 1 + eps) {
        /* df ~= 1 */
      prob = P * M_PI_2;
      q = Math.cos(prob) / Math.sin(prob);
    } else {
        /*-- usual case;  including, e.g.,  df = 1.1 */
      a = 1 / (ndf - 0.5);
      b = 48 / (a * a);
      c = ((20700 * a / b - 98) * a - 16) * a + 96.36;
      d = ((94.5 / (b + c) - 3) / b + 1) * Math.sqrt(a * M_PI_2) * ndf;
      y = Math.pow(d * P, 2 / ndf);
      if (y > 0.05 + a) {
        /* Asymptotic inverse expansion about normal */
        x = qnorm(0.5 * P, false);
        y = x * x;
        if (ndf < 5) c += 0.3 * (ndf - 4.5) * (x + 0.6);
        c = (((0.05 * d * x - 5) * x - 7) * x - 2) * x + b + c;
        y = (((((0.4 * y + 6.3) * y + 36) * y + 94.5) / c - y - 3) / b + 1) * x;
        y = a * y * y;
        if (y > 0.002) /* FIXME: This cutoff is machine-precision dependent*/ y = Math.exp(y) - 1;
        else {
            /* Taylor of    e^y -1 : */
          y = (0.5 * y + 1) * y;
        }
      } else {
        y =
            ((1 / (((ndf + 6) / (ndf * y) - 0.089 * d - 0.822) * (ndf + 2) * 3) + 0.5 / (ndf + 4))
                            * y
                        - 1)
                    * (ndf + 1)
                    / (ndf + 2)
                + 1 / y;
      }
      q = Math.sqrt(ndf * y);
    }
    if (neg) q = -q;
    return q;
  }
コード例 #11
0
ファイル: Utils.java プロジェクト: mkolod/pdfxtk
  public static float gaussian(float x, float mean, float sd) {
    float mu = mean;
    float sigma = sd;

    float k1 = (float) ((float) 1 / (sigma * (Math.sqrt(2 * Math.PI))));
    float k2 = -1 / (2 * (sigma * sigma));

    return (float) (k1 * Math.exp((x - mu) * (x - mu) * k2));
  }
コード例 #12
0
ファイル: LR.java プロジェクト: sjeblee/sp2013.11-731
  public static double g(Double[] xi, int yi) {
    // yi sum (k, wk xik)
    double sum = weightvector[0]; // w0

    for (int k = 1; k < numfeatures + 1; k++) {
      sum += weightvector[k] * xi[k];
    }
    return (1 / (1 + Math.exp(yi * sum))); // exp(-(-yi*sum))
  } // end g(z)
コード例 #13
0
ファイル: svm.java プロジェクト: CPernet/CanlabCore
	static double k_function(svm_node[] x, svm_node[] y,
					svm_parameter param)
	{
		switch(param.kernel_type)
		{
			case svm_parameter.LINEAR:
				return dot(x,y);
			case svm_parameter.POLY:
				return Math.pow(param.gamma*dot(x,y)+param.coef0,param.degree);
			case svm_parameter.RBF:
			{
				double sum = 0;
				int xlen = x.length;
				int ylen = y.length;
				int i = 0;
				int j = 0;
				while(i < xlen && j < ylen)
				{
					if(x[i].index == y[j].index)
					{
						double d = x[i++].value - y[j++].value;
						sum += d*d;
					}
					else if(x[i].index > y[j].index)
					{
						sum += y[j].value * y[j].value;
						++j;
					}
					else
					{
						sum += x[i].value * x[i].value;
						++i;
					}
				}

				while(i < xlen)
				{
					sum += x[i].value * x[i].value;
					++i;
				}

				while(j < ylen)
				{
					sum += y[j].value * y[j].value;
					++j;
				}

				return Math.exp(-param.gamma*sum);
			}
			case svm_parameter.SIGMOID:
				return tanh(param.gamma*dot(x,y)+param.coef0);
			default:
				System.err.print("unknown kernel function.\n");
				System.exit(1);
				return 0;	// java
		}
	}
コード例 #14
0
ファイル: StatFunctions.java プロジェクト: Balkanlii/nlp
  public static double pnorm(double z, boolean upper) {
    /* Reference:
       I. D. Hill
       Algorithm AS 66: "The Normal Integral"
       Applied Statistics
    */
    double ltone = 7.0,
        utzero = 18.66,
        con = 1.28,
        a1 = 0.398942280444,
        a2 = 0.399903438504,
        a3 = 5.75885480458,
        a4 = 29.8213557808,
        a5 = 2.62433121679,
        a6 = 48.6959930692,
        a7 = 5.92885724438,
        b1 = 0.398942280385,
        b2 = 3.8052e-8,
        b3 = 1.00000615302,
        b4 = 3.98064794e-4,
        b5 = 1.986153813664,
        b6 = 0.151679116635,
        b7 = 5.29330324926,
        b8 = 4.8385912808,
        b9 = 15.1508972451,
        b10 = 0.742380924027,
        b11 = 30.789933034,
        b12 = 3.99019417011;
    double y, alnorm;

    if (z < 0) {
      upper = !upper;
      z = -z;
    }
    if (z <= ltone || upper && z <= utzero) {
      y = 0.5 * z * z;
      if (z > con) {
        alnorm =
            b1
                * Math.exp(-y)
                / (z
                    - b2
                    + b3
                        / (z
                            + b4
                            + b5 / (z - b6 + b7 / (z + b8 - b9 / (z + b10 + b11 / (z + b12))))));
      } else {
        alnorm = 0.5 - z * (a1 - a2 * y / (y + a3 - a4 / (y + a5 + a6 / (y + a7))));
      }
    } else {
      alnorm = 0;
    }
    if (!upper) alnorm = 1 - alnorm;
    return (alnorm);
  }
コード例 #15
0
ファイル: Enrichment.java プロジェクト: gifford-lab/GEM
 public String reportLine() {
   return String.format(
       "%s, pval= %.2e, %d / %d = %.3f (freq in GO %d / %d = %.3f)",
       getCategory(),
       Math.exp(getLogPValue()),
       getx(),
       getn(),
       (double) ((double) getx()) / getn(),
       getTheta(),
       getN(),
       (double) ((double) getTheta()) / getN());
 }
コード例 #16
0
	  private double computeDistortionProb(int englishPosition, int frenchPosition, 
			  int numEnglishWords, int numFrenchWords) {
		  double alpha = 1.0;
		  double kNull = .2; // proportion of probability mass to allot for null alignments
		  if (englishPosition == 0) { // How to compute distortion probability for null alignments?
			  return kNull;
		  }
		  else {
			  double dist = englishPosition - (frenchPosition * numEnglishWords / numFrenchWords);
			  double metric = (1.0 - kNull) * Math.exp(-1.0 * alpha * dist);
			  return metric;
		  }		  
	  }
コード例 #17
0
ファイル: DComplex.java プロジェクト: Narnach/ioke
 public static DComplex power(double x_re, double x_im, double y_re, double y_im) {
   double h;
   /* #ifdef JAVA5 */
   // h = Math.hypot(x_re, x_im);
   /* #else */
   h = DComplex.hypot(x_re, x_im);
   /* #endif */
   double logr = Math.log(h);
   double t = Math.atan2(x_im, x_re);
   double r = Math.exp(logr * y_re - y_im * t);
   t = y_im * logr + y_re * t;
   return Complex.polar(r, t);
 }
コード例 #18
0
ファイル: StatFunctions.java プロジェクト: Balkanlii/nlp
 public double pchisq(double q, double df) {
   // Posten, H. (1989) American Statistician 43 p. 261-265
   double df2 = df * .5;
   double q2 = q * .5;
   int n = 5, k;
   double tk, CFL, CFU, prob;
   if (q <= 0 || df <= 0)
     throw new IllegalArgumentException("Illegal argument " + q + " or " + df + " for qnorm(p).");
   if (q < df) {
     tk = q2 * (1 - n - df2) / (df2 + 2 * n - 1 + n * q2 / (df2 + 2 * n));
     for (k = n - 1; k > 1; k--)
       tk = q2 * (1 - k - df2) / (df2 + 2 * k - 1 + k * q2 / (df2 + 2 * k + tk));
     CFL = 1 - q2 / (df2 + 1 + q2 / (df2 + 2 + tk));
     prob = Math.exp(df2 * Math.log(q2) - q2 - Maths.logGamma(df2 + 1) - Math.log(CFL));
   } else {
     tk = (n - df2) / (q2 + n);
     for (k = n - 1; k > 1; k--) tk = (k - df2) / (q2 + k / (1 + tk));
     CFU = 1 + (1 - df2) / (q2 + 1 / (1 + tk));
     prob = 1 - Math.exp((df2 - 1) * Math.log(q2) - q2 - Maths.logGamma(df2) - Math.log(CFU));
   }
   return prob;
 }
コード例 #19
0
ファイル: MaxEntModel.java プロジェクト: zhoufeng/HanLP
  /**
   * 预测
   *
   * @param context 环境
   * @param prior 先验概率
   * @param model 特征函数
   * @return
   */
  public static double[] eval(int[] context, double[] prior, EvalParameters model) {
    Context[] params = model.getParams();
    int numfeats[] = new int[model.getNumOutcomes()];
    int[] activeOutcomes;
    double[] activeParameters;
    double value = 1;
    for (int ci = 0; ci < context.length; ci++) {
      if (context[ci] >= 0) {
        Context predParams = params[context[ci]];
        activeOutcomes = predParams.getOutcomes();
        activeParameters = predParams.getParameters();
        for (int ai = 0; ai < activeOutcomes.length; ai++) {
          int oid = activeOutcomes[ai];
          numfeats[oid]++;
          prior[oid] += activeParameters[ai] * value;
        }
      }
    }

    double normal = 0.0;
    for (int oid = 0; oid < model.getNumOutcomes(); oid++) {
      if (model.getCorrectionParam() != 0) {
        prior[oid] =
            Math.exp(
                prior[oid] * model.getConstantInverse()
                    + ((1.0 - ((double) numfeats[oid] / model.getCorrectionConstant()))
                        * model.getCorrectionParam()));
      } else {
        prior[oid] = Math.exp(prior[oid] * model.getConstantInverse());
      }
      normal += prior[oid];
    }

    for (int oid = 0; oid < model.getNumOutcomes(); oid++) {
      prior[oid] /= normal;
    }
    return prior;
  }
コード例 #20
0
ファイル: StatFunctions.java プロジェクト: Balkanlii/nlp
 public static double betainv(double x, double p, double q) {
   // ALGORITHM AS 63 APPL. STATIST. VOL.32, NO.1
   // Computes P(Beta>x)
   double beta = Maths.logBeta(p, q), acu = 1E-14;
   double cx, psq, pp, qq, x2, term, ai, betain, ns, rx, temp;
   boolean indx;
   if (p <= 0 || q <= 0) return (-1.0);
   if (x <= 0 || x >= 1) return (-1.0);
   psq = p + q;
   cx = 1 - x;
   if (p < psq * x) {
     x2 = cx;
     cx = x;
     pp = q;
     qq = p;
     indx = true;
   } else {
     x2 = x;
     pp = p;
     qq = q;
     indx = false;
   }
   term = 1;
   ai = 1;
   betain = 1;
   ns = qq + cx * psq;
   rx = x2 / cx;
   temp = qq - ai;
   if (ns == 0) rx = x2;
   while (temp > acu && temp > acu * betain) {
     term = term * temp * rx / (pp + ai);
     betain = betain + term;
     temp = Math.abs(term);
     if (temp > acu && temp > acu * betain) {
       ai++;
       ns--;
       if (ns >= 0) {
         temp = qq - ai;
         if (ns == 0) rx = x2;
       } else {
         temp = psq;
         psq += 1;
       }
     }
   }
   betain *= Math.exp(pp * Math.log(x2) + (qq - 1) * Math.log(cx) - beta) / pp;
   if (indx) betain = 1 - betain;
   return (betain);
 }
コード例 #21
0
ファイル: svm.java プロジェクト: CPernet/CanlabCore
	double kernel_function(int i, int j)
	{
		switch(kernel_type)
		{
			case svm_parameter.LINEAR:
				return dot(x[i],x[j]);
			case svm_parameter.POLY:
				return Math.pow(gamma*dot(x[i],x[j])+coef0,degree);
			case svm_parameter.RBF:
				return Math.exp(-gamma*(x_square[i]+x_square[j]-2*dot(x[i],x[j])));
			case svm_parameter.SIGMOID:
				return tanh(gamma*dot(x[i],x[j])+coef0);
			default:
				System.err.print("unknown kernel function.\n");
				System.exit(1);
				return 0;	// java
		}
	}
コード例 #22
0
  private void prop2nextLayer() {
    /*
     * It computes the forward propagation algorithm.
     */
    poshidprobs = data.times(vishid);
    // (1 * numdims) * (numdims * numhid)
    poshidprobs.plusEquals(hidbiases);
    // data*vishid + hidbiases
    double[][] product_tmp2 = poshidprobs.getArray();

    for (int i2 = 0; i2 < numhid; i2++) {
      /*
       * compute the updated input, and write them to newinput
       */
      product_tmp2[0][i2] = 1 / (1 + Math.exp(-product_tmp2[0][i2]));
      newinput[i2] = (int) (product_tmp2[0][i2] * 255.0);
    }
  }
コード例 #23
0
ファイル: Main.java プロジェクト: YpGu/gcoev
 /** log_sum_exp: for inputs a1, a2, ... output log (e^a1 + e^a2 + ... ) */
 public static double log_sum_exp(List<Double> ls) {
   double ins_log = 0;
   Collections.sort(ls);
   double v_max = ls.get(ls.size() - 1);
   if (Double.isNaN(v_max)) {
     System.out.println("ERROR6");
   }
   for (int i = 0; i < ls.size(); i++) {
     ins_log += Math.exp(ls.get(i) - v_max);
   }
   double res = v_max + Math.log(ins_log);
   if (Double.isNaN(res)) {
     System.out.println("ERROR4");
     Scanner sc = new Scanner(System.in);
     int gu = sc.nextInt();
   }
   return res;
 }
コード例 #24
0
 /**
  * Get the top few clauses from this searcher, cutting off at the given minimum probability.
  *
  * @param thresholdProbability The threshold under which to stop returning clauses. This should be
  *     between 0 and 1.
  * @return The resulting {@link edu.stanford.nlp.naturalli.SentenceFragment} objects, representing
  *     the top clauses of the sentence.
  */
 public List<SentenceFragment> topClauses(double thresholdProbability) {
   List<SentenceFragment> results = new ArrayList<>();
   search(
       triple -> {
         assert triple.first <= 0.0;
         double prob = Math.exp(triple.first);
         assert prob <= 1.0;
         assert prob >= 0.0;
         assert !Double.isNaN(prob);
         if (prob >= thresholdProbability) {
           SentenceFragment fragment = triple.third.get();
           fragment.score = prob;
           results.add(fragment);
           return true;
         } else {
           return false;
         }
       });
   return results;
 }
コード例 #25
0
  private void getposphase() {
    /*
     * It does the positive phase of unsupervised RBM training algorithm
     *
     * For details, please refer to Dr. Hinton's paper:
     * Reducing the dimensionality of data with neural networks. Science, Vol. 313. no. 5786, pp. 504 - 507, 28 July 2006.
     */

    // Start calculate the positive phase
    // calculate the cured value of h0
    poshidprobs = data.times(vishid);
    // (1 * numdims) * (numdims * numhid)
    poshidprobs.plusEquals(hidbiases);
    // data*vishid + hidbiases
    double[][] product_tmp2 = poshidprobs.getArray();
    int i2 = 0;
    while (i2 < numhid) {
      product_tmp2[0][i2] = 1 / (1 + Math.exp(-product_tmp2[0][i2]));
      i2++;
    }
    posprods = data.transpose().times(poshidprobs);
    // (numdims * 1) * (1 * numhid)

    // end of the positive phase calculation, find the binary presentation of h0
    int i3 = 0;
    double[][] tmp1 = poshidprobs.getArray();
    double[][] tmp2 = new double[1][numhid];
    Random randomgenerator = new Random();
    while (i3 < numhid) {
      /*
       * a sampling according to possiblity given by poshidprobs
       */
      if (tmp1[0][i3] > randomgenerator.nextDouble()) tmp2[0][i3] = 1;
      else tmp2[0][i3] = 0;
      i3++;
    }

    // poshidstates is a binary sampling according to possiblity given by poshidprobs
    poshidstates = new Matrix(tmp2);
  }
コード例 #26
0
 // returns true if neuron spike caused by input spike at time t in us
 boolean stimulate(int t) {
   if (!initialized) {
     lastt = t;
     initialized = true;
     return true;
   }
   if (t < lastt) {
     reset();
     return true;
   }
   int dt = t - lastt;
   float delta = dt / filter.tauUs;
   float exp = delta > 20 ? 0 : (float) Math.exp(-delta);
   float newstate = state * exp + filter.weight;
   if (newstate > max) {
     newstate = max;
   }
   boolean spike = random.nextFloat() > state; // spike goes through based on decayed state
   state = newstate;
   lastt = t;
   return spike;
 }
コード例 #27
0
ファイル: svm.java プロジェクト: CPernet/CanlabCore
	private static double tanh(double x)
	{
		double e = Math.exp(x);
		return 1.0-2.0/(e*e+1);
	}
コード例 #28
0
ファイル: QuantileDigest.java プロジェクト: cyenjung/pinot
 private double weight(long timestamp) {
   return Math.exp(alpha * (timestamp - landmarkInSeconds));
 }
コード例 #29
0
  /**
   * Calculate the probability of a dependency as a real probability between 0 and 1 inclusive.
   *
   * @param dependency The dependency for which the probability is to be calculated. The tags in
   *     this dependency are in the reduced TagProjection space.
   * @return The probability of the dependency
   */
  protected double probTB(IntDependency dependency) {
    if (verbose) {
      // System.out.println("tagIndex: " + tagIndex);
      System.err.println("Generating " + dependency);
    }

    boolean leftHeaded = dependency.leftHeaded && directional;

    int hW = dependency.head.word;
    int aW = dependency.arg.word;
    short hT = dependency.head.tag;
    short aT = dependency.arg.tag;

    IntTaggedWord aTW = dependency.arg;
    IntTaggedWord hTW = dependency.head;

    boolean isRoot = rootTW(dependency.head);
    double pb_stop_hTWds;
    if (isRoot) {
      pb_stop_hTWds = 0.0;
    } else {
      pb_stop_hTWds = getStopProb(dependency);
    }

    if (dependency.arg.word == STOP_WORD_INT) {
      // did we generate stop?
      return pb_stop_hTWds;
    }

    double pb_go_hTWds = 1.0 - pb_stop_hTWds;

    // generate the argument

    short binDistance = valenceBin(dependency.distance);

    // KEY:
    // c_     count of (read as joint count of first and second)
    // p_     MLE prob of (or MAP if useSmoothTagProjection)
    // pb_    MAP prob of (read as prob of first given second thing)
    // a      arg
    // h      head
    // T      tag
    // PT     projected tag
    // W      word
    // d      direction
    // ds     distance (implicit: there when direction is mentioned!)

    IntTaggedWord anyHead = new IntTaggedWord(ANY_WORD_INT, dependency.head.tag);
    IntTaggedWord anyArg = new IntTaggedWord(ANY_WORD_INT, dependency.arg.tag);
    IntTaggedWord anyTagArg = new IntTaggedWord(dependency.arg.word, ANY_TAG_INT);

    IntDependency temp =
        new IntDependency(dependency.head, dependency.arg, leftHeaded, binDistance);
    double c_aTW_hTWd = argCounter.getCount(temp);
    temp = new IntDependency(dependency.head, anyArg, leftHeaded, binDistance);
    double c_aT_hTWd = argCounter.getCount(temp);
    temp = new IntDependency(dependency.head, wildTW, leftHeaded, binDistance);
    double c_hTWd = argCounter.getCount(temp);

    temp = new IntDependency(anyHead, dependency.arg, leftHeaded, binDistance);
    double c_aTW_hTd = argCounter.getCount(temp);
    temp = new IntDependency(anyHead, anyArg, leftHeaded, binDistance);
    double c_aT_hTd = argCounter.getCount(temp);
    temp = new IntDependency(anyHead, wildTW, leftHeaded, binDistance);
    double c_hTd = argCounter.getCount(temp);

    // for smooth tag projection
    short aPT = Short.MIN_VALUE;
    double c_aPTW_hPTd = Double.NaN;
    double c_aPT_hPTd = Double.NaN;
    double c_hPTd = Double.NaN;
    double c_aPTW_aPT = Double.NaN;
    double c_aPT = Double.NaN;

    if (useSmoothTagProjection) {
      aPT = tagProject(dependency.arg.tag);
      short hPT = tagProject(dependency.head.tag);

      IntTaggedWord projectedArg = new IntTaggedWord(dependency.arg.word, aPT);
      IntTaggedWord projectedAnyHead = new IntTaggedWord(ANY_WORD_INT, hPT);
      IntTaggedWord projectedAnyArg = new IntTaggedWord(ANY_WORD_INT, aPT);

      temp = new IntDependency(projectedAnyHead, projectedArg, leftHeaded, binDistance);
      c_aPTW_hPTd = argCounter.getCount(temp);
      temp = new IntDependency(projectedAnyHead, projectedAnyArg, leftHeaded, binDistance);
      c_aPT_hPTd = argCounter.getCount(temp);
      temp = new IntDependency(projectedAnyHead, wildTW, leftHeaded, binDistance);
      c_hPTd = argCounter.getCount(temp);

      temp = new IntDependency(wildTW, projectedArg, false, ANY_DISTANCE_INT);
      c_aPTW_aPT = argCounter.getCount(temp);
      temp = new IntDependency(wildTW, projectedAnyArg, false, ANY_DISTANCE_INT);
      c_aPT = argCounter.getCount(temp);
    }

    // wild head is always directionless and no use distance
    temp = new IntDependency(wildTW, dependency.arg, false, ANY_DISTANCE_INT);
    double c_aTW = argCounter.getCount(temp);
    temp = new IntDependency(wildTW, anyArg, false, ANY_DISTANCE_INT);
    double c_aT = argCounter.getCount(temp);
    temp = new IntDependency(wildTW, anyTagArg, false, ANY_DISTANCE_INT);
    double c_aW = argCounter.getCount(temp);

    // do the Bayesian magic
    // MLE probs
    double p_aTW_hTd;
    double p_aT_hTd;
    double p_aTW_aT;
    double p_aW;
    double p_aPTW_aPT;
    double p_aPTW_hPTd;
    double p_aPT_hPTd;

    // backoffs either mle or themselves bayesian smoothed depending on useSmoothTagProjection
    if (useSmoothTagProjection) {
      if (useUnigramWordSmoothing) {
        p_aW = c_aW > 0.0 ? (c_aW / numWordTokens) : 1.0; // NEED this 1.0 for unknown words!!!
        p_aPTW_aPT = (c_aPTW_aPT + smooth_aPTW_aPT * p_aW) / (c_aPT + smooth_aPTW_aPT);
      } else {
        p_aPTW_aPT =
            c_aPTW_aPT > 0.0 ? (c_aPTW_aPT / c_aPT) : 1.0; // NEED this 1.0 for unknown words!!!
      }
      p_aTW_aT = (c_aTW + smooth_aTW_aT * p_aPTW_aPT) / (c_aT + smooth_aTW_aT);

      p_aPTW_hPTd = c_hPTd > 0.0 ? (c_aPTW_hPTd / c_hPTd) : 0.0;
      p_aTW_hTd = (c_aTW_hTd + smooth_aTW_hTd * p_aPTW_hPTd) / (c_hTd + smooth_aTW_hTd);

      p_aPT_hPTd = c_hPTd > 0.0 ? (c_aPT_hPTd / c_hPTd) : 0.0;
      p_aT_hTd = (c_aT_hTd + smooth_aT_hTd * p_aPT_hPTd) / (c_hTd + smooth_aT_hTd);
    } else {
      // here word generation isn't smoothed - can't get previously unseen word with tag.  Ugh.
      if (op.testOptions.useLexiconToScoreDependencyPwGt) {
        // We don't know the position.  Now -1 means average over 0 and 1.
        p_aTW_aT =
            dependency.leftHeaded
                ? Math.exp(lex.score(dependency.arg, 1, wordIndex.get(dependency.arg.word)))
                : Math.exp(lex.score(dependency.arg, -1, wordIndex.get(dependency.arg.word)));
        // double oldScore = c_aTW > 0.0 ? (c_aTW / c_aT) : 1.0;
        // if (oldScore == 1.0) {
        //  System.err.println("#### arg=" + dependency.arg + " score=" + p_aTW_aT +
        //                      " oldScore=" + oldScore + " c_aTW=" + c_aTW + " c_aW=" + c_aW);
        // }
      } else {
        p_aTW_aT = c_aTW > 0.0 ? (c_aTW / c_aT) : 1.0;
      }
      p_aTW_hTd = c_hTd > 0.0 ? (c_aTW_hTd / c_hTd) : 0.0;
      p_aT_hTd = c_hTd > 0.0 ? (c_aT_hTd / c_hTd) : 0.0;
    }

    double pb_aTW_hTWd = (c_aTW_hTWd + smooth_aTW_hTWd * p_aTW_hTd) / (c_hTWd + smooth_aTW_hTWd);
    double pb_aT_hTWd = (c_aT_hTWd + smooth_aT_hTWd * p_aT_hTd) / (c_hTWd + smooth_aT_hTWd);

    double score = (interp * pb_aTW_hTWd + (1.0 - interp) * p_aTW_aT * pb_aT_hTWd) * pb_go_hTWds;

    if (verbose) {
      NumberFormat nf = NumberFormat.getNumberInstance();
      nf.setMaximumFractionDigits(2);
      if (useSmoothTagProjection) {
        if (useUnigramWordSmoothing) {
          System.err.println(
              "  c_aW=" + c_aW + ", numWordTokens=" + numWordTokens + ", p(aW)=" + nf.format(p_aW));
        }
        System.err.println(
            "  c_aPTW_aPT="
                + c_aPTW_aPT
                + ", c_aPT="
                + c_aPT
                + ", smooth_aPTW_aPT="
                + smooth_aPTW_aPT
                + ", p(aPTW|aPT)="
                + nf.format(p_aPTW_aPT));
      }
      System.err.println(
          "  c_aTW="
              + c_aTW
              + ", c_aT="
              + c_aT
              + ", smooth_aTW_aT="
              + smooth_aTW_aT
              + ", ## p(aTW|aT)="
              + nf.format(p_aTW_aT));

      if (useSmoothTagProjection) {
        System.err.println(
            "  c_aPTW_hPTd="
                + c_aPTW_hPTd
                + ", c_hPTd="
                + c_hPTd
                + ", p(aPTW|hPTd)="
                + nf.format(p_aPTW_hPTd));
      }
      System.err.println(
          "  c_aTW_hTd="
              + c_aTW_hTd
              + ", c_hTd="
              + c_hTd
              + ", smooth_aTW_hTd="
              + smooth_aTW_hTd
              + ", p(aTW|hTd)="
              + nf.format(p_aTW_hTd));

      if (useSmoothTagProjection) {
        System.err.println(
            "  c_aPT_hPTd="
                + c_aPT_hPTd
                + ", c_hPTd="
                + c_hPTd
                + ", p(aPT|hPTd)="
                + nf.format(p_aPT_hPTd));
      }
      System.err.println(
          "  c_aT_hTd="
              + c_aT_hTd
              + ", c_hTd="
              + c_hTd
              + ", smooth_aT_hTd="
              + smooth_aT_hTd
              + ", p(aT|hTd)="
              + nf.format(p_aT_hTd));

      System.err.println(
          "  c_aTW_hTWd="
              + c_aTW_hTWd
              + ", c_hTWd="
              + c_hTWd
              + ", smooth_aTW_hTWd="
              + smooth_aTW_hTWd
              + ", ## p(aTW|hTWd)="
              + nf.format(pb_aTW_hTWd));
      System.err.println(
          "  c_aT_hTWd="
              + c_aT_hTWd
              + ", c_hTWd="
              + c_hTWd
              + ", smooth_aT_hTWd="
              + smooth_aT_hTWd
              + ", ## p(aT|hTWd)="
              + nf.format(pb_aT_hTWd));

      System.err.println(
          "  interp="
              + interp
              + ", prescore="
              + nf.format(interp * pb_aTW_hTWd + (1.0 - interp) * p_aTW_aT * pb_aT_hTWd)
              + ", P(go|hTWds)="
              + nf.format(pb_go_hTWds)
              + ", score="
              + nf.format(score));
    }

    if (op.testOptions.prunePunc && pruneTW(aTW)) {
      return 1.0;
    }

    if (Double.isNaN(score)) {
      score = 0.0;
    }

    // if (op.testOptions.rightBonus && ! dependency.leftHeaded)
    //  score -= 0.2;

    if (score < MIN_PROBABILITY) {
      score = 0.0;
    }

    return score;
  }
コード例 #30
0
ファイル: Neural.java プロジェクト: rubencodes/neuralnet
  public static Vector<Double> trainNode(
      String filename,
      int outputRepresentation,
      String inputRepresentation,
      int filter,
      double learningRate,
      int epochs) {
    Vector<Double> node = null; // initialize node to null
    double error = 1; // initialize error
    double output = 0; // initialize output
    Vector<Double> trainError = new Vector<Double>();
    double meansq = 0;
    String binTarget = "";
    String binaryRep = "";
    int target;
    openFile(filename); // open our file for training
    firstRead = true;
    int targetCounter = 0;
    while ((target = readFileIntoInputVector(inputRepresentation))
        != -2) { // while we have not reached out end of file error
      if (outputRepresentation == 4) {
        binTarget = Integer.toBinaryString(target); // convert target to binary
        int[] targetArray = new int[4]; // array to hold binary ints
        for (int h = 0; h < targetArray.length; h++) { // add binary elements to int array
          try {
            targetArray[targetArray.length - h - 1] =
                Character.digit(binTarget.charAt(binTarget.length() - h - 1), 10);
          } catch (IndexOutOfBoundsException e) { // if there is empty spaces, fill with zeros
            targetArray[targetArray.length - h - 1] = 0;
          }
        }
        target = targetArray[filter];
        binTarget = "";
        for (int i = 0; i < targetArray.length; i++) {
          binTarget = binTarget + targetArray[i];
        }
      }

      if ((filter == target && outputRepresentation == 10) || outputRepresentation != 10) {
        if (node == null) {
          node = new Vector<Double>();
          for (int i = 0; i < inputs.size(); i++) { // for each input
            double randWeight = 1; // initialize random weight
            while (randWeight > 0.15) // while out random isn't less than 0.15
            randWeight = random.nextDouble(); // get a new random double
            if (random.nextBoolean()) randWeight = randWeight * -1; // randomly set to negative
            node.addElement(new Double(randWeight)); // store random weight
          }
        }
        targetCounter++;
        // train for number of epochs
        for (int i = 0; i < epochs; i++) { // for each epoch
          double weightedSum = 0; // set/reset the weighted sum to 0
          for (int j = 0; j < inputs.size(); j++) {
            weightedSum += (inputs.elementAt(j) * node.elementAt(j)); // calculate weighted sum
          }
          output = activation(weightedSum); // retrieve output
          if (outputRepresentation == 1 || outputRepresentation == 10) output = output * 10;
          else if (outputRepresentation == 4) { // each node must have a whole number
            if (output > 0.5) output = 1;
            else output = 0;
          }

          error = target - output; // calculate error

          if (i == epochs - 1) {
            if (outputRepresentation == 4) {
              if (filter == 0) {
                allOutputs.ensureCapacity(targetCounter);
                allOutputs.add(Integer.toString((int) output));
              } else {
                allOutputs.set(
                    targetCounter - 1, allOutputs.get(targetCounter - 1) + ((int) output));
              }
            }
            trainError.add(error);
            if (Math.round(error) == 0 && outputRepresentation != 4) {
              trainCountCorrect++;
            } else if (outputRepresentation == 4 && filter == 3) {
              if (allOutputs.get(targetCounter - 1).equals(binTarget)) {
                trainCountCorrect++;
              }
            }
          }

          for (int k = 0; k < inputs.size(); k++) { // for each input
            double derivative =
                (1.64872 * Math.exp(weightedSum))
                    / (Math.pow(1.64872 + Math.exp(weightedSum), 2)); // calculate new weights
            double newWeight =
                node.elementAt(k)
                    + (learningRate * inputs.elementAt(k) * error * derivative); // cont.
            node.setElementAt(newWeight, k); // update weights
          }
        }
      }
      inputs.clear();
    }
    errorVect.addElement((Vector<Double>) trainError.clone());
    trainError.clear();
    if (outputRepresentation == 4 && filter == 3)
      System.out.println(
          "Training Percent: " + (100 * (double) trainCountCorrect / (double) targetCounter));
    if (outputRepresentation == 10 && filter == 9)
      System.out.println(
          "Training Percent: " + (10 * (double) trainCountCorrect / (double) targetCounter));
    if (outputRepresentation == 1)
      System.out.println(
          "Training Percent: " + (100 * (double) trainCountCorrect / (double) targetCounter));
    closeFile();
    return node; // node is now trained for an epoch
  }