Esempio n. 1
0
 /**
  * Converts source parameters to natural parameters.
  *
  * @param L source parameters \f$ \mathbf{\Lambda} = (p_1, \cdots, p_k)\f$
  * @return natural parameters \f$ \mathbf{\Theta} = \left( \log \left( \frac{p_i}{p_k} \right)
  *     \right)_i \f$
  */
 public PVector Lambda2Theta(PVector L) {
   PVector theta = new PVector(L.getDimension() - 1);
   theta.type = Parameter.TYPE.NATURAL_PARAMETER;
   for (int i = 0; i < L.getDimension() - 1; i++)
     theta.array[i] = Math.log(L.array[i] / L.array[L.getDimension() - 1]);
   return theta;
 }
 /**
  * Computes the density value \f$ f(x;\mu) \f$.
  *
  * @param x point
  * @param param parameters (source, natural, or expectation)
  * @return \f$ f(x;\mu) = \frac{1}{ (2\pi)^{d/2} } \exp \left( - \frac{(x-\mu)^T (x-\mu)}{2}
  *     \right) \mbox{ for } x \in \mathds{R}^d \f$
  */
 public double density(PVector x, PVector param) {
   if (param.type == Parameter.TYPE.SOURCE_PARAMETER) {
     double v1 = (x.Minus(param)).InnerProduct(x.Minus(param));
     double v2 = Math.exp(-0.5d * v1);
     return v2 / Math.pow(2.0d * Math.PI, (double) x.dim / 2.0d);
   } else if (param.type == Parameter.TYPE.NATURAL_PARAMETER) return super.density(x, param);
   else return super.density(x, Eta2Theta(param));
 }
Esempio n. 3
0
 /**
  * Converts expectation parameters to source parameters.
  *
  * @param H natural parameters \f$ \mathbf{H} = (\eta_1, \cdots, \eta_{k-1})\f$
  * @return source parameters \f$ \mathbf{\Lambda} = \begin{cases} p_i = \frac{\eta_i}{n} &
  *     \mbox{if $i<k$}\\ p_k = \frac{n - \sum_{j=1}^{k-1} \eta_j}{n} \end{cases}\f$
  */
 public PVector Eta2Lambda(PVector H) {
   PVector L = new PVector(H.getDimension() + 1);
   L.type = Parameter.TYPE.SOURCE_PARAMETER;
   double sum = 0;
   for (int i = 0; i < H.getDimension(); i++) {
     L.array[i] = H.array[i] / n;
     sum += H.array[i];
   }
   L.array[H.getDimension()] = (n - sum) / n;
   return L;
 }
Esempio n. 4
0
  /**
   * Computes \f$ \nabla G (\mathbf{H})\f$
   *
   * @param H expectation parameters \f$ \mathbf{H} = (\eta_1, \cdots, \eta_{k-1}) \f$
   * @return \f$ \nabla G( \mathbf{H} ) = \left( \log \left( \frac{\eta_i}{n - \sum_{j=1}^{k-1}
   *     \eta_j} \right) \right)_i \f$
   */
  public PVector gradG(PVector H) {

    // Sum
    double sum = 0;
    for (int i = 0; i < H.getDimension(); i++) sum += H.array[i];

    // Gradient
    PVector gradient = new PVector(H.getDimension());
    gradient.type = Parameter.TYPE.NATURAL_PARAMETER;
    for (int i = 0; i < H.getDimension(); i++) gradient.array[i] = Math.log(H.array[i] / (n - sum));

    // Return
    return gradient;
  }
Esempio n. 5
0
  /**
   * Computes \f$ \nabla F ( \mathbf{\Theta} )\f$.
   *
   * @param T naturel parameters \f$ \mathbf{\Theta} = (\theta_1, \cdots, \theta_{k-1}) \f$
   * @return \f$ \nabla F( \mathbf{\Theta} ) = \left( \frac{n \exp \theta_i}{1 + \sum_{j=1}^{k-1}
   *     \exp \theta_j} \right)_i \f$
   */
  public PVector gradF(PVector T) {

    // Sum
    double sum = 0;
    for (int i = 0; i < T.getDimension(); i++) sum += Math.exp(T.array[i]);

    // Gradient
    PVector gradient = new PVector(T.getDimension());
    gradient.type = Parameter.TYPE.EXPECTATION_PARAMETER;
    for (int i = 0; i < T.getDimension(); i++)
      gradient.array[i] = (n * Math.exp(T.array[i])) / (1 + sum);

    // Return
    return gradient;
  }
Esempio n. 6
0
 /**
  * Computes \f$ G(\mathbf{H})\f$.
  *
  * @param H expectation parameters \f$ \mathbf{H} = (\eta_1, \cdots, \eta_{k-1}) \f$
  * @return \f$ G(\mathbf{H}) = \left( \sum_{i=1}^{k-1} \eta_i \log \eta_i \right) + \left( n -
  *     \sum_{i=1}^{k-1} \eta_i \right) \log \left( n - \sum_{i=1}^{k-1} \eta_i \right) \f$
  */
 public double G(PVector H) {
   double sum1 = 0;
   double sum2 = 0;
   for (int i = 0; i < H.getDimension(); i++) {
     sum1 += H.array[i] * Math.log(H.array[i]);
     sum2 += H.array[i];
   }
   return sum1 + (n - sum2) * Math.log(n - sum2);
 }
Esempio n. 7
0
 /**
  * Computes the density value \f$ f(x) \f$.
  *
  * @param x point
  * @param param parameters (source, natural, or expectation)
  * @return \f$ f(x_1,\cdots,x_k;p_1,\cdots,p_k,n) = \frac{n!}{x_1! \cdots x_k!} p_1^{x_1} \cdots
  *     p_k^{x_k} \f$
  */
 public double density(PVector x, PVector param) {
   if (param.type == Parameter.TYPE.SOURCE_PARAMETER) {
     double prod1 = 1;
     double prod2 = 1;
     for (int i = 0; i < param.getDimension(); i++) {
       prod1 *= fact(x.array[i]);
       prod2 *= Math.pow(param.array[i], x.array[i]);
     }
     return (fact(n) * prod2) / prod1;
   } else if (param.type == Parameter.TYPE.NATURAL_PARAMETER) return super.density(x, param);
   else return super.density(x, Eta2Theta(param));
 }
Esempio n. 8
0
  /**
   * Converts natural parameters to source parameters.
   *
   * @param T natural parameters \f$ \mathbf{\Theta} = ( \theta_1, \cdots, \theta_{k-1} )\f$
   * @return source parameters \f$ \mathbf{\Lambda} = \begin{cases} p_i = \frac{\exp \theta_i}{1 +
   *     \sum_{j=1}^{k-1}(\exp \theta_j)} & \mbox{if $i<k$}\\ p_k = \frac{1}{1 +
   *     \sum_{j=1}^{k-1}(\exp \theta_j)} \end{cases} \f$
   */
  public PVector Theta2Lambda(PVector T) {

    // Sums
    double sum = 0;
    for (int i = 0; i < T.getDimension(); i++) sum += Math.exp(T.array[i]);

    // Conversion
    PVector lambda = new PVector(T.getDimension() + 1);
    lambda.type = Parameter.TYPE.SOURCE_PARAMETER;
    for (int i = 0; i < T.getDimension(); i++) lambda.array[i] = Math.exp(T.array[i]) / (1.0 + sum);
    lambda.array[T.getDimension()] = 1.0 / (1.0 + sum);

    // Return
    return lambda;
  }
 /**
  * Computes \f$ G(\mathbf{H})\f$
  *
  * @param H expectation parameters \f$ \mathbf{H} = \eta \f$
  * @return \f$ F(\mathbf{\theta})= \frac{1}{2} \eta^\top\eta + \frac{d}{2}\log 2\pi \f$
  */
 public double G(PVector H) {
   return 0.5d * (H.InnerProduct(H) + H.dim * Math.log(2 * Math.PI));
 }
Esempio n. 10
0
 /**
  * Computes \f$ \nabla F ( \mathbf{\Theta} )\f$.
  *
  * @param T natural \f$ \mathbf{\Theta} = \theta \f$
  * @return \f$ \nabla F( \mathbf{\Theta} ) = \theta \f$
  */
 public PVector gradF(PVector T) {
   PVector gradient = (PVector) T.clone();
   gradient.type = Parameter.TYPE.EXPECTATION_PARAMETER;
   return gradient;
 }
Esempio n. 11
0
 /**
  * Computes the log normalizer \f$ F( \mathbf{\Theta} ) \f$.
  *
  * @param T natural parameters \f$ \mathbf{\Theta} = \theta \f$
  * @return \f$ F(\mathbf{\theta}) = \frac{1}{2} \theta^\top\theta + \frac{d}{2}\log 2\pi \f$
  */
 public double F(PVector T) {
   return 0.5d * (T.InnerProduct(T) + T.dim * Math.log(2 * Math.PI));
 }
Esempio n. 12
0
 /**
  * Converts expectation parameters to source parameters.
  *
  * @param H expectation parameters \f$ \mathbf{H} = \eta\f$
  * @return source parameters \f$ \mathbf{\Lambda} = \eta \f$
  */
 public PVector Eta2Lambda(PVector H) {
   PVector L = new PVector(1);
   L.array[0] = H.array[0];
   L.type = Parameter.TYPE.SOURCE_PARAMETER;
   return L;
 }
Esempio n. 13
0
 /**
  * Converts natural parameters to source parameters.
  *
  * @param T natural parameters \f$ \mathbf{\Theta} = \theta \f$
  * @return source parameters \f$ \mathbf{\Lambda} = \frac{\exp\theta}{1+\exp\theta} \f$
  */
 public PVector Theta2Lambda(PVector T) {
   PVector L = new PVector(1);
   L.array[0] = Math.exp(T.array[0]) / (1 + Math.exp(T.array[0]));
   L.type = Parameter.TYPE.SOURCE_PARAMETER;
   return L;
 }
Esempio n. 14
0
 /**
  * Computes the carrier measure \f$ k(x) \f$.
  *
  * @param x a point
  * @return \f$ k(x) = -\frac{1}{2}x^\top x \f$
  */
 public double k(PVector x) {
   return -0.5d * x.InnerProduct(x);
 }
Esempio n. 15
0
 /**
  * Converts source parameters to expectation parameters.
  *
  * @param L source parameters \f$ \mathbf{\Lambda} = \mu \f$
  * @return expectation parameters \f$ \mathbf{H} = \mu \f$
  */
 public PVector Lambda2Eta(PVector L) {
   PVector H = (PVector) L.clone();
   H.type = Parameter.TYPE.EXPECTATION_PARAMETER;
   return H;
 }
Esempio n. 16
0
 /**
  * Converts source parameters to natural parameters.
  *
  * @param L source parameters \f$ \mathbf{\Lambda} = \mu \f$
  * @return natural parameters \f$ \mathbf{\Theta} = \mu \f$
  */
 public PVector Lambda2Theta(PVector L) {
   PVector T = (PVector) L.clone();
   T.type = Parameter.TYPE.NATURAL_PARAMETER;
   return T;
 }
Esempio n. 17
0
 /**
  * Computes \f$ F( \mathbf{\Theta} ) \f$.
  *
  * @param T parameters \f$ \mathbf{\Theta} = (\theta_1, \cdots, \theta_{k-1}) \f$
  * @return \f$ F(\mathbf{\Theta}) = n \log \left( 1 + \sum_{i=1}^{k-1} \exp \theta_i \right) -
  *     \log n! \f$
  */
 public double F(PVector T) {
   double sum = 0;
   for (int i = 0; i < T.getDimension(); i++) sum += Math.exp(T.array[i]);
   return n * Math.log(1 + sum) - Math.log(fact(n));
 }
Esempio n. 18
0
 /**
  * Computes the Kullback-Leibler divergence between two Binomial distributions.
  *
  * @param LA source parameters \f$ \mathbf{\Lambda}_\alpha \f$
  * @param LB source parameters \f$ \mathbf{\Lambda}_\beta \f$
  * @return \f$ D_{\mathrm{KL}}(f_1\|f_2) = n p_{\alpha,k} \log \frac{p_{\alpha,k}}{p_{\beta,k}} -
  *     n \sum_{i=1}^{k-1} p_{\alpha,i} \log \frac{p_{\beta,i}}{p_{\alpha,i}} \f$
  */
 public double KLD(PVector LA, PVector LB) {
   int k = LA.getDimension() - 1;
   double sum = 0;
   for (int i = 0; i < k; i++) sum += LA.array[i] * Math.log(LB.array[i] / LA.array[i]);
   return n * LA.array[k] * Math.log(LA.array[k] / LB.array[k]) - n * sum;
 }
Esempio n. 19
0
 /**
  * Computes \f$ \nabla G (\mathbf{H})\f$
  *
  * @param H expectation parameters \f$ \mathbf{H} = \eta \f$
  * @return \f$ \nabla G(\mathbf{H}) = \eta \f$
  */
 public PVector gradG(PVector H) {
   PVector gradient = (PVector) H.clone();
   gradient.type = Parameter.TYPE.NATURAL_PARAMETER;
   return gradient;
 }
Esempio n. 20
0
 /**
  * Converts expectation parameters to source parameters.
  *
  * @param H expectation parameters \f$ \mathbf{H} = \eta \f$
  * @return source parameters \f$ \mathbf{\Lambda} = \eta \f$
  */
 public PVector Eta2Lambda(PVector H) {
   PVector L = (PVector) H.clone();
   L.type = Parameter.TYPE.SOURCE_PARAMETER;
   return L;
 }
Esempio n. 21
0
 /**
  * Computes the sufficient statistic \f$ t(x)\f$.
  *
  * @param x a point
  * @return \f$ t(x) = x \f$
  */
 public PVector t(PVector x) {
   PVector t = (PVector) x.clone();
   t.type = Parameter.TYPE.EXPECTATION_PARAMETER;
   return t;
 }
Esempio n. 22
0
 /**
  * Computes \f$ \nabla G (\mathbf{H})\f$.
  *
  * @param H expectation parameters \f$ \mathbf{H} = \eta \f$
  * @return \f$ \nabla G( \mathbf{H} ) = \log \left( \frac{\eta}{1-\eta} \right) \f$
  */
 public PVector gradG(PVector H) {
   PVector gradient = new PVector(1);
   gradient.array[0] = Math.log(H.array[0] / (1 - H.array[0]));
   gradient.type = Parameter.TYPE.NATURAL_PARAMETER;
   return gradient;
 }
Esempio n. 23
0
 /**
  * Converts source parameters to natural parameters.
  *
  * @param L source parameters \f$ \mathbf{\Lambda} = p \f$
  * @return natural parameters \f$ \mathbf{\Theta} = \log \left( \frac{p}{1-p} \right) \f$
  */
 public PVector Lambda2Theta(PVector L) {
   PVector T = new PVector(1);
   T.array[0] = Math.log(L.array[0] / (1 - L.array[0]));
   T.type = Parameter.TYPE.NATURAL_PARAMETER;
   return T;
 }
Esempio n. 24
0
 /**
  * Computes the carrier measure \f$ k(x) \f$.
  *
  * @param x a point
  * @return \f$ k(x) = - \sum_{i=1}^{k} \log x_i ! \f$
  */
 public double k(PVector x) {
   double sum = 0;
   for (int i = 0; i < x.getDimension(); i++) sum -= Math.log(fact(x.array[i]));
   return sum;
 }
Esempio n. 25
0
 /**
  * Converts source parameters to expectation parameters.
  *
  * @param L source parameters \f$ \mathbf{\Lambda} = p \f$
  * @return expectation parameters \f$ \mathbf{H} = p \f$
  */
 public PVector Lambda2Eta(PVector L) {
   PVector H = new PVector(1);
   H.array[0] = L.array[0];
   H.type = Parameter.TYPE.EXPECTATION_PARAMETER;
   return H;
 }
Esempio n. 26
0
 /**
  * Computes the Kullback-Leibler divergence between two multivariate isotropic Gaussian
  * distributions.
  *
  * @param LP source parameters \f$ \mathbf{\Lambda}_P \f$
  * @param LQ source parameters \f$ \mathbf{\Lambda}_Q \f$
  * @return \f$ D_{\mathrm{KL}}(f_P \| f_Q) = \frac{1}{2} ( \mu_Q - \mu_P )^\top( \mu_Q - \mu_P )
  *     \f$
  */
 public double KLD(PVector LP, PVector LQ) {
   PVector diff = LQ.Minus(LP);
   return 0.5d * diff.InnerProduct(diff);
 }
Esempio n. 27
0
 /**
  * Computes \f$ \nabla F ( \mathbf{\Theta} )\f$.
  *
  * @param T natural parameters \f$ \mathbf{\Theta} = \theta \f$
  * @return \f$ \nabla F( \mathbf{\Theta} ) = \frac{\exp \theta}{1 + \exp \theta} \f$
  */
 public PVector gradF(PVector T) {
   PVector gradient = new PVector(1);
   gradient.array[0] = Math.exp(T.array[0]) / (1 + Math.exp(T.array[0]));
   gradient.type = Parameter.TYPE.EXPECTATION_PARAMETER;
   return gradient;
 }
Esempio n. 28
0
 /**
  * Converts source parameters to expectation parameters.
  *
  * @param L source parameters \f$ \mathbf{\Lambda} = ( p_1, \cdots, p_k )\f$
  * @return expectation parameters \f$ \mathbf{H} = \left( n p_i \right)_i\f$
  */
 public PVector Lambda2Eta(PVector L) {
   PVector H = new PVector(L.getDimension() - 1);
   H.type = Parameter.TYPE.EXPECTATION_PARAMETER;
   for (int i = 0; i < L.getDimension() - 1; i++) H.array[i] = n * L.array[i];
   return H;
 }
Esempio n. 29
0
 /**
  * Computes the sufficient statistic \f$ t(x)\f$.
  *
  * @param x a point
  * @return \f$ t(x) = x \f$
  */
 public PVector t(PVector x) {
   PVector t = new PVector(1);
   t.array[0] = x.array[0];
   t.type = Parameter.TYPE.EXPECTATION_PARAMETER;
   return t;
 }
Esempio n. 30
0
 /**
  * Draws a point from the considered distribution.
  *
  * @param L source parameters \f$ \mathbf{\Lambda} = \mu \f$
  * @return a point.
  */
 public PVector drawRandomPoint(PVector L) {
   Random rand = new Random();
   PVector x = new PVector(L.getDimension());
   for (int i = 0; i < L.getDimension(); i++) x.array[i] = L.array[i] + rand.nextGaussian();
   return x;
 }