/** * initializes the algorithm * * @param data the data to work with * @throws Exception if m_SVM is null */ protected void init(Instances data) throws Exception { if (m_SVM == null) { throw new Exception("SVM not initialized in optimizer. Use RegOptimizer.setSVMReg()"); } m_C = m_SVM.getC(); m_data = data; m_classIndex = data.classIndex(); m_nInstances = data.numInstances(); // Initialize kernel m_kernel = Kernel.makeCopy(m_SVM.getKernel()); m_kernel.buildKernel(data); // init m_target m_target = new double[m_nInstances]; for (int i = 0; i < m_nInstances; i++) { m_target[i] = data.instance(i).classValue(); } m_random = new Random(m_nSeed); // initialize alpha and alpha* array to all zero m_alpha = new double[m_target.length]; m_alphaStar = new double[m_target.length]; m_supportVectors = new SMOset(m_nInstances); m_b = 0.0; m_nEvals = 0; m_nCacheHits = -1; }
/** * wrap up various variables to save memeory and do some housekeeping after optimization has * finished. * * @throws Exception if something goes wrong */ protected void wrapUp() throws Exception { m_target = null; m_nEvals = m_kernel.numEvals(); m_nCacheHits = m_kernel.numCacheHits(); if ((m_SVM.getKernel() instanceof PolyKernel) && ((PolyKernel) m_SVM.getKernel()).getExponent() == 1.0) { // convert alpha's to weights double[] weights = new double[m_data.numAttributes()]; for (int k = m_supportVectors.getNext(-1); k != -1; k = m_supportVectors.getNext(k)) { for (int j = 0; j < weights.length; j++) { if (j != m_classIndex) { weights[j] += (m_alpha[k] - m_alphaStar[k]) * m_data.instance(k).value(j); } } } m_weights = weights; // release memory m_alpha = null; m_alphaStar = null; m_kernel = null; } m_bModelBuilt = true; }
/** * Prints out the classifier. * * @return a description of the classifier as a string */ @Override public String toString() { StringBuffer text = new StringBuffer(); text.append("SMOreg\n\n"); if (m_weights != null) { text.append("weights (not support vectors):\n"); // it's a linear machine for (int i = 0; i < m_data.numAttributes(); i++) { if (i != m_classIndex) { text.append( (m_weights[i] >= 0 ? " + " : " - ") + Utils.doubleToString(Math.abs(m_weights[i]), 12, 4) + " * "); if (m_SVM.getFilterType().getSelectedTag().getID() == SMOreg.FILTER_STANDARDIZE) { text.append("(standardized) "); } else if (m_SVM.getFilterType().getSelectedTag().getID() == SMOreg.FILTER_NORMALIZE) { text.append("(normalized) "); } text.append(m_data.attribute(i).name() + "\n"); } } } else { // non linear, print out all supportvectors text.append("Support vectors:\n"); for (int i = 0; i < m_nInstances; i++) { if (m_alpha[i] > 0) { text.append("+" + m_alpha[i] + " * k[" + i + "]\n"); } if (m_alphaStar[i] > 0) { text.append("-" + m_alphaStar[i] + " * k[" + i + "]\n"); } } } text.append((m_b <= 0 ? " + " : " - ") + Utils.doubleToString(Math.abs(m_b), 12, 4) + "\n\n"); text.append("\n\nNumber of kernel evaluations: " + m_nEvals); if (m_nCacheHits >= 0 && m_nEvals > 0) { double hitRatio = 1 - m_nEvals * 1.0 / (m_nCacheHits + m_nEvals); text.append(" (" + Utils.doubleToString(hitRatio * 100, 7, 3).trim() + "% cached)"); } return text.toString(); }