@Override
 public void addResult(Instance inst, double[] classVotes) {
   double weight = inst.weight();
   int trueClass = (int) inst.classValue();
   if (weight > 0.0) {
     if (TotalweightObserved == 0) {
       reset(inst.dataset().numClasses());
     }
     this.TotalweightObserved += weight;
     this.weightObserved.add(weight);
     int predictedClass = Utils.maxIndex(classVotes);
     if (predictedClass == trueClass) {
       this.weightCorrect.add(weight);
     } else {
       this.weightCorrect.add(0);
     }
     // Add Kappa statistic information
     for (int i = 0; i < this.numClasses; i++) {
       this.rowKappa[i].add(i == predictedClass ? weight : 0);
       this.columnKappa[i].add(i == trueClass ? weight : 0);
     }
     if (this.lastSeenClass == trueClass) {
       this.weightCorrectNoChangeClassifier.add(weight);
     } else {
       this.weightCorrectNoChangeClassifier.add(0);
     }
     this.classAccuracy[trueClass].add(predictedClass == trueClass ? weight : 0.0);
     this.lastSeenClass = trueClass;
   }
 }
Exemplo n.º 2
0
  /**
   * Method for building an Id3 tree.
   *
   * @param data the training data
   * @exception Exception if decision tree can't be built successfully
   */
  private void makeTree(Instances data) throws Exception {

    // Check if no instances have reached this node.
    if (data.numInstances() == 0) {
      m_Attribute = null;
      m_ClassValue = Utils.missingValue();
      m_Distribution = new double[data.numClasses()];
      return;
    }

    // Compute attribute with maximum information gain.
    double[] infoGains = new double[data.numAttributes()];
    Enumeration attEnum = data.enumerateAttributes();
    while (attEnum.hasMoreElements()) {
      Attribute att = (Attribute) attEnum.nextElement();
      infoGains[att.index()] = computeInfoGain(data, att);
    }
    m_Attribute = data.attribute(Utils.maxIndex(infoGains));

    // Make leaf if information gain is zero.
    // Otherwise create successors.
    if (Utils.eq(infoGains[m_Attribute.index()], 0)) {
      m_Attribute = null;
      m_Distribution = new double[data.numClasses()];
      Enumeration instEnum = data.enumerateInstances();
      while (instEnum.hasMoreElements()) {
        Instance inst = (Instance) instEnum.nextElement();
        m_Distribution[(int) inst.classValue()]++;
      }
      Utils.normalize(m_Distribution);
      m_ClassValue = Utils.maxIndex(m_Distribution);
      m_ClassAttribute = data.classAttribute();
    } else {
      Instances[] splitData = splitData(data, m_Attribute);
      m_Successors = new Id3[m_Attribute.numValues()];
      for (int j = 0; j < m_Attribute.numValues(); j++) {
        m_Successors[j] = new Id3();
        m_Successors[j].makeTree(splitData[j]);
      }
    }
  }
Exemplo n.º 3
0
    /**
     * Evaluate Jacobian vector
     *
     * @param x the current values of variables
     * @return the gradient vector
     */
    protected double[] evaluateGradient(double[] x) {
      double[] grad = new double[x.length];
      int dim = m_NumPredictors + 1; // Number of variables per class

      for (int i = 0; i < cls.length; i++) { // ith instance
        double[] num = new double[m_NumClasses - 1]; // numerator of
        // [-log(1+sum(exp))]'
        int index;
        for (int offset = 0; offset < m_NumClasses - 1; offset++) { // Which
          // part of x
          double exp = 0.0;
          index = offset * dim;
          for (int j = 0; j < dim; j++) {
            exp += m_Data[i][j] * x[index + j];
          }
          num[offset] = exp;
        }

        double max = num[Utils.maxIndex(num)];
        double denom = Math.exp(-max); // Denominator of [-log(1+sum(exp))]'
        for (int offset = 0; offset < m_NumClasses - 1; offset++) {
          num[offset] = Math.exp(num[offset] - max);
          denom += num[offset];
        }
        Utils.normalize(num, denom);

        // Update denominator of the gradient of -log(Posterior)
        double firstTerm;
        for (int offset = 0; offset < m_NumClasses - 1; offset++) { // Which
          // part of x
          index = offset * dim;
          firstTerm = weights[i] * num[offset];
          for (int q = 0; q < dim; q++) {
            grad[index + q] += firstTerm * m_Data[i][q];
          }
        }

        if (cls[i] != m_NumClasses - 1) { // Not the last class
          for (int p = 0; p < dim; p++) {
            grad[cls[i] * dim + p] -= weights[i] * m_Data[i][p];
          }
        }
      }

      // Ridge: note that intercepts NOT included
      for (int offset = 0; offset < m_NumClasses - 1; offset++) {
        for (int r = 1; r < dim; r++) {
          grad[offset * dim + r] += 2 * m_Ridge * x[offset * dim + r];
        }
      }

      return grad;
    }
Exemplo n.º 4
0
  /**
   * Classifies a given instance. Either this or distributionForInstance() needs to be implemented
   * by subclasses.
   *
   * @param instance the instance to be assigned to a cluster
   * @return the number of the assigned cluster as an integer
   * @exception Exception if instance could not be clustered successfully
   */
  @Override
  public int clusterInstance(Instance instance) throws Exception {

    double[] dist = distributionForInstance(instance);

    if (dist == null) {
      throw new Exception("Null distribution predicted");
    }

    if (Utils.sum(dist) <= 0) {
      throw new Exception("Unable to cluster instance");
    }
    return Utils.maxIndex(dist);
  }
  /**
   * Generates a clusterer by the mean of spectral clustering algorithm.
   *
   * @param data set of instances serving as training data
   * @exception Exception if the clusterer has not been generated successfully
   */
  public void buildClusterer(Instances data) throws java.lang.Exception {
    m_Sequences = new Instances(data);
    int n = data.numInstances();
    int k = data.numAttributes();
    DoubleMatrix2D w;
    if (useSparseMatrix) w = DoubleFactory2D.sparse.make(n, n);
    else w = DoubleFactory2D.dense.make(n, n);
    double[][] v1 = new double[n][];
    for (int i = 0; i < n; i++) v1[i] = data.instance(i).toDoubleArray();
    v = DoubleFactory2D.dense.make(v1);
    double sigma_sq = sigma * sigma;
    // Sets up similarity matrix
    for (int i = 0; i < n; i++)
      for (int j = i; j < n; j++) {
        /*double dist = distnorm2(v.viewRow(i), v.viewRow(j));
        if((r == -1) || (dist < r)) {
          double sim = Math.exp(- (dist * dist) / (2 * sigma_sq));
          w.set(i, j, sim);
          w.set(j, i, sim);
        }*/
        /* String [] key = {data.instance(i).stringValue(0), data.instance(j).stringValue(0)};
        System.out.println(key[0]);
        System.out.println(key[1]);
        System.out.println(simScoreMap.containsKey(key));
        Double simValue = simScoreMap.get(key);*/

        double sim = sim_matrix[i][j];
        w.set(i, j, sim);
        w.set(j, i, sim);
      }

    // Partitions points
    int[][] p = partition(w, alpha_star);

    // Deploys results
    numOfClusters = p.length;
    cluster = new int[n];
    for (int i = 0; i < p.length; i++) for (int j = 0; j < p[i].length; j++) cluster[p[i][j]] = i;

    // System.out.println("Final partition:");
    // UtilsJS.printMatrix(p);
    // System.out.println("Cluster:\n");
    // UtilsJS.printArray(cluster);
    this.numOfClusters = cluster[Utils.maxIndex(cluster)] + 1;
    //  System.out.println("Num clusters:\t"+this.numOfClusters);
  }
Exemplo n.º 6
0
  // use the TriTrainer Classifier to classify Instance;
  public double classifyInstance(Instance instance) throws Exception {
    double result;
    double[] dist;
    int index;
    dist = distributionForInstance(instance); // 分类概率

    if (instance.classAttribute().isNominal()) {
      index = Utils.maxIndex(dist); // 返回概率最大的
      if (dist[index] == 0) result = Instance.missingValue();
      else result = dist[index];
    } else if (instance.classAttribute().isNumeric()) {
      result = dist[0];
    } else {
      result = Instance.missingValue();
    }
    return result;
  }
Exemplo n.º 7
0
  /**
   * Calculates the class membership probabilities for the given test instance.
   *
   * @param instance the instance to be classified
   * @return predicted class probability distribution
   * @throws Exception if there is a problem generating the prediction
   */
  public double[] distributionForInstance(Instance instance) throws Exception {
    double[] probOfClassGivenDoc = new double[m_numClasses];

    // calculate the array of log(Pr[D|C])
    double[] logDocGivenClass = new double[m_numClasses];
    for (int h = 0; h < m_numClasses; h++) logDocGivenClass[h] = probOfDocGivenClass(instance, h);

    double max = logDocGivenClass[Utils.maxIndex(logDocGivenClass)];
    double probOfDoc = 0.0;

    for (int i = 0; i < m_numClasses; i++) {
      probOfClassGivenDoc[i] = Math.exp(logDocGivenClass[i] - max) * m_probOfClass[i];
      probOfDoc += probOfClassGivenDoc[i];
    }

    Utils.normalize(probOfClassGivenDoc, probOfDoc);

    return probOfClassGivenDoc;
  }
Exemplo n.º 8
0
    /**
     * Evaluate objective function
     *
     * @param x the current values of variables
     * @return the value of the objective function
     */
    protected double objectiveFunction(double[] x) {
      double nll = 0; // -LogLikelihood
      int dim = m_NumPredictors + 1; // Number of variables per class

      for (int i = 0; i < cls.length; i++) { // ith instance

        double[] exp = new double[m_NumClasses - 1];
        int index;
        for (int offset = 0; offset < m_NumClasses - 1; offset++) {
          index = offset * dim;
          for (int j = 0; j < dim; j++) {
            exp[offset] += m_Data[i][j] * x[index + j];
          }
        }
        double max = exp[Utils.maxIndex(exp)];
        double denom = Math.exp(-max);
        double num;
        if (cls[i] == m_NumClasses - 1) { // Class of this instance
          num = -max;
        } else {
          num = exp[cls[i]] - max;
        }
        for (int offset = 0; offset < m_NumClasses - 1; offset++) {
          denom += Math.exp(exp[offset] - max);
        }

        nll -= weights[i] * (num - Math.log(denom)); // Weighted NLL
      }

      // Ridge: note that intercepts NOT included
      for (int offset = 0; offset < m_NumClasses - 1; offset++) {
        for (int r = 1; r < dim; r++) {
          nll += m_Ridge * x[offset * dim + r] * x[offset * dim + r];
        }
      }

      return nll;
    }
  /**
   * Calculates the class membership probabilities for the given test instance.
   *
   * @param instance the instance to be classified
   * @return predicted class probability distribution
   * @throws Exception if there is a problem generating the prediction
   */
  @Override
  public double[] distributionForInstance(Instance instance) throws Exception {

    tokenizeInstance(instance, false);

    double[] probOfClassGivenDoc = new double[m_data.numClasses()];

    double[] logDocGivenClass = new double[m_data.numClasses()];
    for (int i = 0; i < m_data.numClasses(); i++) {
      logDocGivenClass[i] += Math.log(m_probOfClass[i]);

      LinkedHashMap<String, Count> dictForClass = m_probOfWordGivenClass.get(i);

      int allWords = 0;
      // for document normalization (if in use)
      double iNorm = 0;
      double fv = 0;

      if (m_normalize) {
        for (Map.Entry<String, Count> feature : m_inputVector.entrySet()) {
          String word = feature.getKey();
          Count c = feature.getValue();

          // check the word against all the dictionaries (all classes)
          boolean ok = false;
          for (int clss = 0; clss < m_data.numClasses(); clss++) {
            if (m_probOfWordGivenClass.get(clss).get(word) != null) {
              ok = true;
              break;
            }
          }

          // only normalize with respect to those words that we've seen during
          // training
          // (i.e. dictionary over all classes)
          if (ok) {
            // word counts or bag-of-words?
            fv = (m_wordFrequencies) ? c.m_count : 1.0;
            iNorm += Math.pow(Math.abs(fv), m_lnorm);
          }
        }
        iNorm = Math.pow(iNorm, 1.0 / m_lnorm);
      }

      // System.out.println("---- " + m_inputVector.size());
      for (Map.Entry<String, Count> feature : m_inputVector.entrySet()) {
        String word = feature.getKey();
        Count dictCount = dictForClass.get(word);
        // System.out.print(word + " ");
        /*
         * if (dictCount != null) { System.out.println(dictCount.m_count); }
         * else { System.out.println("*1"); }
         */
        // check the word against all the dictionaries (all classes)
        boolean ok = false;
        for (int clss = 0; clss < m_data.numClasses(); clss++) {
          if (m_probOfWordGivenClass.get(clss).get(word) != null) {
            ok = true;
            break;
          }
        }

        // ignore words we haven't seen in the training data
        if (ok) {
          double freq = (m_wordFrequencies) ? feature.getValue().m_count : 1.0;
          // double freq = (feature.getValue().m_count / iNorm * m_norm);
          if (m_normalize) {
            freq /= iNorm * m_norm;
          }
          allWords += freq;

          if (dictCount != null) {
            logDocGivenClass[i] += freq * Math.log(dictCount.m_count);
          } else {
            // leplace for zero frequency
            logDocGivenClass[i] += freq * Math.log(m_leplace);
          }
        }
      }

      if (m_wordsPerClass[i] > 0) {
        logDocGivenClass[i] -= allWords * Math.log(m_wordsPerClass[i]);
      }
    }

    double max = logDocGivenClass[Utils.maxIndex(logDocGivenClass)];

    for (int i = 0; i < m_data.numClasses(); i++) {
      probOfClassGivenDoc[i] = Math.exp(logDocGivenClass[i] - max);
    }

    Utils.normalize(probOfClassGivenDoc);

    return probOfClassGivenDoc;
  }
Exemplo n.º 10
0
 /**
  * Return the argmax on #distribution(Instance, double[]).
  *
  * @return argmax_{k in 0,1,...} p( y_j = k | x , y_pred )
  */
 public double classify(Instance x, double ypred[]) throws Exception {
   Instance x_ = transform(x, ypred);
   return Utils.maxIndex(h.distributionForInstance(x_));
 }
  /**
   * Accepts and processes a classifier encapsulated in an incremental classifier event
   *
   * @param ce an <code>IncrementalClassifierEvent</code> value
   */
  @Override
  public void acceptClassifier(final IncrementalClassifierEvent ce) {
    try {
      if (ce.getStatus() == IncrementalClassifierEvent.NEW_BATCH) {
        m_throughput = new StreamThroughput(statusMessagePrefix());
        m_throughput.setSamplePeriod(m_statusFrequency);

        // m_eval = new Evaluation(ce.getCurrentInstance().dataset());
        m_eval = new Evaluation(ce.getStructure());
        m_eval.useNoPriors();

        m_dataLegend = new Vector();
        m_reset = true;
        m_dataPoint = new double[0];
        Instances inst = ce.getStructure();
        System.err.println("NEW BATCH");
        m_instanceCount = 0;

        if (m_windowSize > 0) {
          m_window = new LinkedList<Instance>();
          m_windowEval = new Evaluation(ce.getStructure());
          m_windowEval.useNoPriors();
          m_windowedPreds = new LinkedList<double[]>();

          if (m_logger != null) {
            m_logger.logMessage(
                statusMessagePrefix()
                    + "[IncrementalClassifierEvaluator] Chart output using windowed "
                    + "evaluation over "
                    + m_windowSize
                    + " instances");
          }
        }

        /*
         * if (m_logger != null) { m_logger.statusMessage(statusMessagePrefix()
         * + "IncrementalClassifierEvaluator: started processing...");
         * m_logger.logMessage(statusMessagePrefix() +
         * " [IncrementalClassifierEvaluator]" + statusMessagePrefix() +
         * " started processing..."); }
         */
      } else {
        Instance inst = ce.getCurrentInstance();
        if (inst != null) {
          m_throughput.updateStart();
          m_instanceCount++;
          // if (inst.attribute(inst.classIndex()).isNominal()) {
          double[] dist = ce.getClassifier().distributionForInstance(inst);
          double pred = 0;
          if (!inst.isMissing(inst.classIndex())) {
            if (m_outputInfoRetrievalStats) {
              // store predictions so AUC etc can be output.
              m_eval.evaluateModelOnceAndRecordPrediction(dist, inst);
            } else {
              m_eval.evaluateModelOnce(dist, inst);
            }

            if (m_windowSize > 0) {

              m_windowEval.evaluateModelOnce(dist, inst);
              m_window.addFirst(inst);
              m_windowedPreds.addFirst(dist);

              if (m_instanceCount > m_windowSize) {
                // "forget" the oldest prediction
                Instance oldest = m_window.removeLast();

                double[] oldDist = m_windowedPreds.removeLast();
                oldest.setWeight(-oldest.weight());
                m_windowEval.evaluateModelOnce(oldDist, oldest);
                oldest.setWeight(-oldest.weight());
              }
            }
          } else {
            pred = ce.getClassifier().classifyInstance(inst);
          }
          if (inst.classIndex() >= 0) {
            // need to check that the class is not missing
            if (inst.attribute(inst.classIndex()).isNominal()) {
              if (!inst.isMissing(inst.classIndex())) {
                if (m_dataPoint.length < 2) {
                  m_dataPoint = new double[3];
                  m_dataLegend.addElement("Accuracy");
                  m_dataLegend.addElement("RMSE (prob)");
                  m_dataLegend.addElement("Kappa");
                }
                // int classV = (int) inst.value(inst.classIndex());

                if (m_windowSize > 0) {
                  m_dataPoint[1] = m_windowEval.rootMeanSquaredError();
                  m_dataPoint[2] = m_windowEval.kappa();
                } else {
                  m_dataPoint[1] = m_eval.rootMeanSquaredError();
                  m_dataPoint[2] = m_eval.kappa();
                }
                // int maxO = Utils.maxIndex(dist);
                // if (maxO == classV) {
                // dist[classV] = -1;
                // maxO = Utils.maxIndex(dist);
                // }
                // m_dataPoint[1] -= dist[maxO];
              } else {
                if (m_dataPoint.length < 1) {
                  m_dataPoint = new double[1];
                  m_dataLegend.addElement("Confidence");
                }
              }
              double primaryMeasure = 0;
              if (!inst.isMissing(inst.classIndex())) {
                if (m_windowSize > 0) {
                  primaryMeasure = 1.0 - m_windowEval.errorRate();
                } else {
                  primaryMeasure = 1.0 - m_eval.errorRate();
                }
              } else {
                // record confidence as the primary measure
                // (another possibility would be entropy of
                // the distribution, or perhaps average
                // confidence)
                primaryMeasure = dist[Utils.maxIndex(dist)];
              }
              // double [] dataPoint = new double[1];
              m_dataPoint[0] = primaryMeasure;
              // double min = 0; double max = 100;
              /*
               * ChartEvent e = new
               * ChartEvent(IncrementalClassifierEvaluator.this, m_dataLegend,
               * min, max, dataPoint);
               */

              m_ce.setLegendText(m_dataLegend);
              m_ce.setMin(0);
              m_ce.setMax(1);
              m_ce.setDataPoint(m_dataPoint);
              m_ce.setReset(m_reset);
              m_reset = false;
            } else {
              // numeric class
              if (m_dataPoint.length < 1) {
                m_dataPoint = new double[1];
                if (inst.isMissing(inst.classIndex())) {
                  m_dataLegend.addElement("Prediction");
                } else {
                  m_dataLegend.addElement("RMSE");
                }
              }
              if (!inst.isMissing(inst.classIndex())) {
                double update;
                if (!inst.isMissing(inst.classIndex())) {
                  if (m_windowSize > 0) {
                    update = m_windowEval.rootMeanSquaredError();
                  } else {
                    update = m_eval.rootMeanSquaredError();
                  }
                } else {
                  update = pred;
                }
                m_dataPoint[0] = update;
                if (update > m_max) {
                  m_max = update;
                }
                if (update < m_min) {
                  m_min = update;
                }
              }

              m_ce.setLegendText(m_dataLegend);
              m_ce.setMin((inst.isMissing(inst.classIndex()) ? m_min : 0));
              m_ce.setMax(m_max);
              m_ce.setDataPoint(m_dataPoint);
              m_ce.setReset(m_reset);
              m_reset = false;
            }
            notifyChartListeners(m_ce);
          }
          m_throughput.updateEnd(m_logger);
        }

        if (ce.getStatus() == IncrementalClassifierEvent.BATCH_FINISHED || inst == null) {
          if (m_logger != null) {
            m_logger.logMessage(
                "[IncrementalClassifierEvaluator]"
                    + statusMessagePrefix()
                    + " Finished processing.");
          }
          m_throughput.finished(m_logger);

          // save memory if using windowed evaluation for charting
          m_windowEval = null;
          m_window = null;
          m_windowedPreds = null;

          if (m_textListeners.size() > 0) {
            String textTitle = ce.getClassifier().getClass().getName();
            textTitle = textTitle.substring(textTitle.lastIndexOf('.') + 1, textTitle.length());
            String results =
                "=== Performance information ===\n\n"
                    + "Scheme:   "
                    + textTitle
                    + "\n"
                    + "Relation: "
                    + m_eval.getHeader().relationName()
                    + "\n\n"
                    + m_eval.toSummaryString();
            if (m_eval.getHeader().classIndex() >= 0
                && m_eval.getHeader().classAttribute().isNominal()
                && (m_outputInfoRetrievalStats)) {
              results += "\n" + m_eval.toClassDetailsString();
            }

            if (m_eval.getHeader().classIndex() >= 0
                && m_eval.getHeader().classAttribute().isNominal()) {
              results += "\n" + m_eval.toMatrixString();
            }
            textTitle = "Results: " + textTitle;
            TextEvent te = new TextEvent(this, results, textTitle);
            notifyTextListeners(te);
          }
        }
      }
    } catch (Exception ex) {
      if (m_logger != null) {
        m_logger.logMessage(
            "[IncrementalClassifierEvaluator]"
                + statusMessagePrefix()
                + " Error processing prediction "
                + ex.getMessage());
        m_logger.statusMessage(
            statusMessagePrefix() + "ERROR: problem processing prediction (see log for details)");
      }
      ex.printStackTrace();
      stop();
    }
  }
  /**
   * Recursively generates a tree. A derivative of the buildTree function from the
   * "weka.classifiers.trees.RandomTree" class, with the following changes made:
   *
   * <ul>
   *   <li>m_ClassProbs are now remembered only in leaves, not in every node of the tree
   *   <li>m_Distribution has been removed
   *   <li>members of dists, splits, props and vals arrays which are not used are dereferenced prior
   *       to recursion to reduce memory requirements
   *   <li>a check for "branch with no training instances" is now (FastRF 0.98) made before
   *       recursion; with the current implementation of splitData(), empty branches can appear only
   *       with nominal attributes with more than two categories
   *   <li>each new 'tree' (i.e. node or leaf) is passed a reference to its 'mother forest',
   *       necessary to look up parameters such as maxDepth and K
   *   <li>pre-split entropy is not recalculated unnecessarily
   *   <li>uses DataCache instead of weka.core.Instances, the reference to the DataCache is stored
   *       as a field in FastRandomTree class and not passed recursively down new buildTree() calls
   *   <li>similarly, a reference to the random number generator is stored in a field of the
   *       DataCache
   *   <li>m_ClassProbs are now normalized by dividing with number of instances in leaf, instead of
   *       forcing the sum of class probabilities to 1.0; this has a large effect when
   *       class/instance weights are set by user
   *   <li>a little imprecision is allowed in checking whether there was a decrease in entropy after
   *       splitting
   *   <li>0.99: the temporary arrays splits, props, vals now are not wide as the full number of
   *       attributes in the dataset (of which only "k" columns of randomly chosen attributes get
   *       filled). Now, it's just a single array which gets replaced as the k features are
   *       evaluated sequentially, but it gets replaced only if a next feature is better than a
   *       previous one.
   *   <li>0.99: the SortedIndices are now not cut up into smaller arrays on every split, but rather
   *       re-sorted within the same array in the splitDataNew(), and passed down to buildTree() as
   *       the original large matrix, but with start and end points explicitly specified
   * </ul>
   *
   * @param sortedIndices the indices of the instances of the whole bootstrap replicate
   * @param startAt First index of the instance to consider in this split; inclusive.
   * @param endAt Last index of the instance to consider; inclusive.
   * @param classProbs the class distribution
   * @param debug whether debugging is on
   * @param attIndicesWindow the attribute window to choose attributes from
   * @param depth the current depth
   */
  protected void buildTree(
      int[][] sortedIndices,
      int startAt,
      int endAt,
      double[] classProbs,
      boolean debug,
      int[] attIndicesWindow,
      int depth) {

    m_Debug = debug;
    int sortedIndicesLength = endAt - startAt + 1;

    // Check if node doesn't contain enough instances or is pure
    // or maximum depth reached, make leaf.
    if ((sortedIndicesLength < Math.max(2, getMinNum())) // small
        || Utils.eq(classProbs[Utils.maxIndex(classProbs)], Utils.sum(classProbs)) // pure
        || ((getMaxDepth() > 0) && (depth >= getMaxDepth())) // deep
    ) {
      m_Attribute = -1; // indicates leaf (no useful attribute to split on)

      // normalize by dividing with the number of instances (as of ver. 0.97)
      // unless leaf is empty - this can happen with splits on nominal
      // attributes with more than two categories
      if (sortedIndicesLength != 0)
        for (int c = 0; c < classProbs.length; c++) {
          classProbs[c] /= sortedIndicesLength;
        }
      m_ClassProbs = classProbs;
      this.data = null;
      return;
    } // (leaf making)

    // new 0.99: all the following are for the best attribute only! they're updated while
    // sequentially through the attributes
    double val = Double.NaN; // value of splitting criterion
    double[][] dist =
        new double[2]
            [data.numClasses]; // class distributions (contingency table), indexed first by branch,
                               // then by class
    double[] prop = new double[2]; // the branch sizes (as fraction)
    double split = Double.NaN; // split point

    // Investigate K random attributes
    int attIndex = 0;
    int windowSize = attIndicesWindow.length;
    int k = getKValue();
    boolean sensibleSplitFound = false;
    double prior = Double.NaN;
    double bestNegPosterior = -Double.MAX_VALUE;
    int bestAttIdx = -1;

    while ((windowSize > 0) && (k-- > 0 || !sensibleSplitFound)) {

      int chosenIndex = data.reusableRandomGenerator.nextInt(windowSize);
      attIndex = attIndicesWindow[chosenIndex];

      // shift chosen attIndex out of window
      attIndicesWindow[chosenIndex] = attIndicesWindow[windowSize - 1];
      attIndicesWindow[windowSize - 1] = attIndex;
      windowSize--;

      // new: 0.99
      double candidateSplit =
          distributionSequentialAtt(
              prop, dist, bestNegPosterior, attIndex, sortedIndices[attIndex], startAt, endAt);

      if (Double.isNaN(candidateSplit)) {
        continue; // we did not improve over a previous attribute! "dist" is unchanged from before
      }
      // by this point we know we have an improvement, so we keep the new split point
      split = candidateSplit;
      bestAttIdx = attIndex;

      if (Double.isNaN(
          prior)) { // needs to be computed only once per branch - is same for all attributes (even
                    // regardless of missing values)
        prior = SplitCriteria.entropyOverColumns(dist);
      }

      double negPosterior =
          -SplitCriteria.entropyConditionedOnRows(dist); // this is an updated dist
      if (negPosterior > bestNegPosterior) {
        bestNegPosterior = negPosterior;
      } else {
        throw new IllegalArgumentException("Very strange!");
      }

      val = prior - (-negPosterior); // we want the greatest reduction in entropy
      if (val > 1e-2) { // we allow some leeway here to compensate
        sensibleSplitFound = true; // for imprecision in entropy computation
      }
    } // feature by feature in window

    if (sensibleSplitFound) {

      m_Attribute = bestAttIdx; // find best attribute
      m_SplitPoint = split;
      m_Prop = prop;
      prop = null; // can be GC'ed

      // int[][][] subsetIndices =
      //        new int[dist.length][data.numAttributes][];
      // splitData( subsetIndices, m_Attribute,
      //        m_SplitPoint, sortedIndices );
      // int numInstancesBeforeSplit = sortedIndices[0].length;

      int belowTheSplitStartsAt =
          splitDataNew(m_Attribute, m_SplitPoint, sortedIndices, startAt, endAt);

      m_Successors = new FastRandomTree[dist.length]; // dist.length now always == 2
      for (int i = 0; i < dist.length; i++) {
        m_Successors[i] = new FastRandomTree();
        m_Successors[i].m_MotherForest = this.m_MotherForest;
        m_Successors[i].data = this.data;
        // new in 0.99 - used in distributionSequentialAtt()
        m_Successors[i].tempDists = this.tempDists;
        m_Successors[i].tempDistsOther = this.tempDistsOther;
        m_Successors[i].tempProps = this.tempProps;

        // check if we're about to make an empty branch - this can happen with
        // nominal attributes with more than two categories (as of ver. 0.98)
        if (belowTheSplitStartsAt - startAt == 0) {
          // in this case, modify the chosenAttDists[i] so that it contains
          // the current, before-split class probabilities, properly normalized
          // by the number of instances (as we won't be able to normalize
          // after the split)
          for (int j = 0; j < dist[i].length; j++) dist[i][j] = classProbs[j] / sortedIndicesLength;
        }

        if (i == 0) { // before split
          m_Successors[i].buildTree(
              sortedIndices,
              startAt,
              belowTheSplitStartsAt - 1,
              dist[i],
              m_Debug,
              attIndicesWindow,
              depth + 1);
        } else { // after split
          m_Successors[i].buildTree(
              sortedIndices,
              belowTheSplitStartsAt,
              endAt,
              dist[i],
              m_Debug,
              attIndicesWindow,
              depth + 1);
        }

        dist[i] = null;
      }
      sortedIndices = null;

    } else { // ------ make leaf --------

      m_Attribute = -1;

      // normalize by dividing with the number of instances (as of ver. 0.97)
      // unless leaf is empty - this can happen with splits on nominal attributes
      if (sortedIndicesLength != 0)
        for (int c = 0; c < classProbs.length; c++) {
          classProbs[c] /= sortedIndicesLength;
        }

      m_ClassProbs = classProbs;
    }

    this.data = null; // dereference all pointers so data can be GC'd after tree is built
  }
Exemplo n.º 13
0
 public boolean correctlyClassifies(Instance inst) {
   return Utils.maxIndex(getVotesForInstance(inst)) == (int) inst.classValue();
 }
Exemplo n.º 14
0
  /**
   * Classifies an instance for internal leave one out cross validation of feature sets
   *
   * @param instance instance to be "left out" and classified
   * @param instA feature values of the selected features for the instance
   * @return the classification of the instance
   * @throws Exception if something goes wrong
   */
  double evaluateInstanceLeaveOneOut(Instance instance, double[] instA) throws Exception {

    DecisionTableHashKey thekey;
    double[] tempDist;
    double[] normDist;

    thekey = new DecisionTableHashKey(instA);
    if (m_classIsNominal) {

      // if this one is not in the table
      if ((tempDist = (double[]) m_entries.get(thekey)) == null) {
        throw new Error("This should never happen!");
      } else {
        normDist = new double[tempDist.length];
        System.arraycopy(tempDist, 0, normDist, 0, tempDist.length);
        normDist[(int) instance.classValue()] -= instance.weight();

        // update the table
        // first check to see if the class counts are all zero now
        boolean ok = false;
        for (int i = 0; i < normDist.length; i++) {
          if (Utils.gr(normDist[i], 1.0)) {
            ok = true;
            break;
          }
        }

        //	downdate the class prior counts
        m_classPriorCounts[(int) instance.classValue()] -= instance.weight();
        double[] classPriors = m_classPriorCounts.clone();
        Utils.normalize(classPriors);
        if (!ok) { // majority class
          normDist = classPriors;
        }

        m_classPriorCounts[(int) instance.classValue()] += instance.weight();

        // if (ok) {
        Utils.normalize(normDist);
        if (m_evaluationMeasure == EVAL_AUC) {
          m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, instance);
        } else {
          m_evaluation.evaluateModelOnce(normDist, instance);
        }
        return Utils.maxIndex(normDist);
        /*} else {
          normDist = new double [normDist.length];
          normDist[(int)m_majority] = 1.0;
          if (m_evaluationMeasure == EVAL_AUC) {
            m_evaluation.evaluateModelOnceAndRecordPrediction(normDist, instance);
          } else {
            m_evaluation.evaluateModelOnce(normDist, instance);
          }
          return m_majority;
        } */
      }
      //      return Utils.maxIndex(tempDist);
    } else {

      // see if this one is already in the table
      if ((tempDist = (double[]) m_entries.get(thekey)) != null) {
        normDist = new double[tempDist.length];
        System.arraycopy(tempDist, 0, normDist, 0, tempDist.length);
        normDist[0] -= (instance.classValue() * instance.weight());
        normDist[1] -= instance.weight();
        if (Utils.eq(normDist[1], 0.0)) {
          double[] temp = new double[1];
          temp[0] = m_majority;
          m_evaluation.evaluateModelOnce(temp, instance);
          return m_majority;
        } else {
          double[] temp = new double[1];
          temp[0] = normDist[0] / normDist[1];
          m_evaluation.evaluateModelOnce(temp, instance);
          return temp[0];
        }
      } else {
        throw new Error("This should never happen!");
      }
    }

    // shouldn't get here
    // return 0.0;
  }
Exemplo n.º 15
0
  /**
   * Returns a description of the classifier.
   *
   * @return a description of the classifier as a string.
   */
  public String toString() {

    if (m_entries == null) {
      return "Decision Table: No model built yet.";
    } else {
      StringBuffer text = new StringBuffer();

      text.append(
          "Decision Table:"
              + "\n\nNumber of training instances: "
              + m_numInstances
              + "\nNumber of Rules : "
              + m_entries.size()
              + "\n");

      if (m_useIBk) {
        text.append("Non matches covered by IB1.\n");
      } else {
        text.append("Non matches covered by Majority class.\n");
      }

      text.append(m_search.toString());
      /*text.append("Best first search for feature set,\nterminated after "+
      m_maxStale+" non improving subsets.\n"); */

      text.append("Evaluation (for feature selection): CV ");
      if (m_CVFolds > 1) {
        text.append("(" + m_CVFolds + " fold) ");
      } else {
        text.append("(leave one out) ");
      }
      text.append("\nFeature set: " + printFeatures());

      if (m_displayRules) {

        // find out the max column width
        int maxColWidth = 0;
        for (int i = 0; i < m_dtInstances.numAttributes(); i++) {
          if (m_dtInstances.attribute(i).name().length() > maxColWidth) {
            maxColWidth = m_dtInstances.attribute(i).name().length();
          }

          if (m_classIsNominal || (i != m_dtInstances.classIndex())) {
            Enumeration e = m_dtInstances.attribute(i).enumerateValues();
            while (e.hasMoreElements()) {
              String ss = (String) e.nextElement();
              if (ss.length() > maxColWidth) {
                maxColWidth = ss.length();
              }
            }
          }
        }

        text.append("\n\nRules:\n");
        StringBuffer tm = new StringBuffer();
        for (int i = 0; i < m_dtInstances.numAttributes(); i++) {
          if (m_dtInstances.classIndex() != i) {
            int d = maxColWidth - m_dtInstances.attribute(i).name().length();
            tm.append(m_dtInstances.attribute(i).name());
            for (int j = 0; j < d + 1; j++) {
              tm.append(" ");
            }
          }
        }
        tm.append(m_dtInstances.attribute(m_dtInstances.classIndex()).name() + "  ");

        for (int i = 0; i < tm.length() + 10; i++) {
          text.append("=");
        }
        text.append("\n");
        text.append(tm);
        text.append("\n");
        for (int i = 0; i < tm.length() + 10; i++) {
          text.append("=");
        }
        text.append("\n");

        Enumeration e = m_entries.keys();
        while (e.hasMoreElements()) {
          DecisionTableHashKey tt = (DecisionTableHashKey) e.nextElement();
          text.append(tt.toString(m_dtInstances, maxColWidth));
          double[] ClassDist = (double[]) m_entries.get(tt);

          if (m_classIsNominal) {
            int m = Utils.maxIndex(ClassDist);
            try {
              text.append(m_dtInstances.classAttribute().value(m) + "\n");
            } catch (Exception ee) {
              System.out.println(ee.getMessage());
            }
          } else {
            text.append((ClassDist[0] / ClassDist[1]) + "\n");
          }
        }

        for (int i = 0; i < tm.length() + 10; i++) {
          text.append("=");
        }
        text.append("\n");
        text.append("\n");
      }
      return text.toString();
    }
  }
Exemplo n.º 16
0
  /**
   * Select the best value for k by hold-one-out cross-validation. If the class attribute is
   * nominal, classification error is minimised. If the class attribute is numeric, mean absolute
   * error is minimised
   */
  protected void crossValidate() {

    try {
      if (m_NNSearch instanceof weka.core.neighboursearch.CoverTree)
        throw new Exception(
            "CoverTree doesn't support hold-one-out "
                + "cross-validation. Use some other NN "
                + "method.");

      double[] performanceStats = new double[m_kNNUpper];
      double[] performanceStatsSq = new double[m_kNNUpper];

      for (int i = 0; i < m_kNNUpper; i++) {
        performanceStats[i] = 0;
        performanceStatsSq[i] = 0;
      }

      m_kNN = m_kNNUpper;
      Instance instance;
      Instances neighbours;
      double[] origDistances, convertedDistances;
      for (int i = 0; i < m_Train.numInstances(); i++) {
        if (m_Debug && (i % 50 == 0)) {
          System.err.print("Cross validating " + i + "/" + m_Train.numInstances() + "\r");
        }
        instance = m_Train.instance(i);
        neighbours = m_NNSearch.kNearestNeighbours(instance, m_kNN);
        origDistances = m_NNSearch.getDistances();

        for (int j = m_kNNUpper - 1; j >= 0; j--) {
          // Update the performance stats
          convertedDistances = new double[origDistances.length];
          System.arraycopy(origDistances, 0, convertedDistances, 0, origDistances.length);
          double[] distribution = makeDistribution(neighbours, convertedDistances);
          double thisPrediction = Utils.maxIndex(distribution);
          if (m_Train.classAttribute().isNumeric()) {
            thisPrediction = distribution[0];
            double err = thisPrediction - instance.classValue();
            performanceStatsSq[j] += err * err; // Squared error
            performanceStats[j] += Math.abs(err); // Absolute error
          } else {
            if (thisPrediction != instance.classValue()) {
              performanceStats[j]++; // Classification error
            }
          }
          if (j >= 1) {
            neighbours = pruneToK(neighbours, convertedDistances, j);
          }
        }
      }

      // Display the results of the cross-validation
      for (int i = 0; i < m_kNNUpper; i++) {
        if (m_Debug) {
          System.err.print("Hold-one-out performance of " + (i + 1) + " neighbors ");
        }
        if (m_Train.classAttribute().isNumeric()) {
          if (m_Debug) {
            if (m_MeanSquared) {
              System.err.println(
                  "(RMSE) = " + Math.sqrt(performanceStatsSq[i] / m_Train.numInstances()));
            } else {
              System.err.println("(MAE) = " + performanceStats[i] / m_Train.numInstances());
            }
          }
        } else {
          if (m_Debug) {
            System.err.println("(%ERR) = " + 100.0 * performanceStats[i] / m_Train.numInstances());
          }
        }
      }

      // Check through the performance stats and select the best
      // k value (or the lowest k if more than one best)
      double[] searchStats = performanceStats;
      if (m_Train.classAttribute().isNumeric() && m_MeanSquared) {
        searchStats = performanceStatsSq;
      }
      double bestPerformance = Double.NaN;
      int bestK = 1;
      for (int i = 0; i < m_kNNUpper; i++) {
        if (Double.isNaN(bestPerformance) || (bestPerformance > searchStats[i])) {
          bestPerformance = searchStats[i];
          bestK = i + 1;
        }
      }
      m_kNN = bestK;
      if (m_Debug) {
        System.err.println("Selected k = " + bestK);
      }

      m_kNNValid = true;
    } catch (Exception ex) {
      throw new Error("Couldn't optimize by cross-validation: " + ex.getMessage());
    }
  }