Exemple #1
0
 /**
  * SVMOutput of an instance in the training set, m_data This uses the cache, unlike
  * SVMOutput(Instance)
  *
  * @param index index of the training instance in m_data
  * @return the SVM output
  * @throws Exception if something goes wrong
  */
 protected double SVMOutput(int index) throws Exception {
   double result = -m_b;
   for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors.getNext(i)) {
     result += (m_alpha[i] - m_alphaStar[i]) * m_kernel.eval(index, i, m_data.instance(index));
   }
   return result;
 }
Exemple #2
0
 /**
  * Compute the value of the objective function.
  *
  * @return the score
  * @throws Exception if something goes wrong
  */
 protected double getScore() throws Exception {
   double res = 0;
   double t = 0, t2 = 0;
   double sumAlpha = 0.0;
   for (int i = 0; i < m_nInstances; i++) {
     sumAlpha += (m_alpha[i] - m_alphaStar[i]);
     for (int j = 0; j < m_nInstances; j++) {
       t +=
           (m_alpha[i] - m_alphaStar[i])
               * (m_alpha[j] - m_alphaStar[j])
               * m_kernel.eval(i, j, m_data.instance(i));
     }
     //    switch(m_nLossType) {
     //    case L1:
     //    t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]);
     //    break;
     //    case L2:
     //    t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]) - (0.5/m_SVM.getC())
     // * (m_alpha[i]*m_alpha[i] + m_alpha_[i]*m_alpha_[i]);
     //    break;
     //    case HUBER:
     //    t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alpha_[i]) -
     // (0.5*m_SVM.getEpsilon()/m_SVM.getC()) * (m_alpha[i]*m_alpha[i] + m_alpha_[i]*m_alpha_[i]);
     //    break;
     //    case EPSILON:
     // t2 += m_data.instance(i).classValue() * (m_alpha[i] - m_alphaStar[i]) - m_epsilon *
     // (m_alpha[i] + m_alphaStar[i]);
     t2 += m_target[i] * (m_alpha[i] - m_alphaStar[i]) - m_epsilon * (m_alpha[i] + m_alphaStar[i]);
     //    break;
     //    }
   }
   res += -0.5 * t + t2;
   return res;
 }
Exemple #3
0
  /**
   * @param inst
   * @return
   * @throws Exception
   */
  public double SVMOutput(Instance inst) throws Exception {

    double result = -m_b;
    // Is the machine linear?
    if (m_weights != null) {
      // Is weight vector stored in sparse format?
      for (int i = 0; i < m_weights.length; i++) {
        if (inst.index(i) != m_classIndex) {
          result += m_weights[inst.index(i)] * inst.valueSparse(i);
        }
      }
    } else {
      for (int i = m_supportVectors.getNext(-1); i != -1; i = m_supportVectors.getNext(i)) {
        result += (m_alpha[i] - m_alphaStar[i]) * m_kernel.eval(-1, i, inst);
      }
    }
    return result;
  }
  /**
   * Builds a model using the current Kernel using the given data and returns the produced output.
   *
   * @param data the instances to test the Kernel on
   * @return a String containing the output of the Kernel.
   */
  protected String useKernel(Instances data) throws Exception {
    Kernel kernel = null;
    StringBuffer text = new StringBuffer();

    try {
      kernel = Kernel.makeCopy(m_Kernel);
    } catch (Exception e) {
      e.printStackTrace();
      fail("Problem setting up to use Kernel: " + e);
    }

    kernel.buildKernel(data);
    for (int n = 0; n < data.numInstances(); n++) {
      for (int i = n; i < data.numInstances(); i++) {
        text.append((n + 1) + "-" + (i + 1) + ": " + kernel.eval(n, i, data.instance(i)) + "\n");
      }
    }

    return text.toString();
  }
 /** Evaluate KernelBasisFunction */
 public double eval(double[] x) {
   return kernel.eval(x, y);
 }