@Override
  protected void _transform(Dataset data, boolean trainingMode) {
    // handle non-numeric types, extract columns to dummy variables

    Map<Object, Dataset.ColumnType> newColumns = new HashMap<>();

    int n = data.size();

    Iterator<Map.Entry<Object, Dataset.ColumnType>> it = data.getColumns().entrySet().iterator();
    while (it.hasNext()) {
      Map.Entry<Object, Dataset.ColumnType> entry = it.next();
      Object column = entry.getKey();
      Dataset.ColumnType columnType = entry.getValue();

      if (columnType == Dataset.ColumnType.CATEGORICAL
          || columnType
              == Dataset.ColumnType
                  .ORDINAL) { // ordinal and categorical are converted into dummyvars

        // Remove the old column from the column map
        it.remove();

        // create dummy variables for all the levels
        for (Record r : data) {

          if (!r.getX().containsKey(column)) {
            continue; // does not contain column
          }

          Object value = r.getX().get(column);

          // remove the column from data
          r.getX().remove(column);

          List<Object> newColumn = Arrays.<Object>asList(column, value);

          // add a new boolean feature with combination of column and value
          r.getX().put(newColumn, true);

          // add the new column in the list for insertion
          newColumns.put(newColumn, Dataset.ColumnType.DUMMYVAR);
        }
      }
    }

    // add the new columns in the dataset column map
    if (!newColumns.isEmpty()) {
      data.getColumns().putAll(newColumns);
    }
  }
  @Override
  protected VM validateModel(Dataset validationData) {
    predictDataset(validationData);

    Set<Object> classesSet = knowledgeBase.getModelParameters().getClasses();

    // create new validation metrics object
    VM validationMetrics = knowledgeBase.getEmptyValidationMetricsObject();

    // short notation
    Map<List<Object>, Double> ctMap = validationMetrics.getContingencyTable();
    for (Object theClass : classesSet) {
      ctMap.put(Arrays.<Object>asList(theClass, SensitivityRates.TP), 0.0); // true possitive
      ctMap.put(Arrays.<Object>asList(theClass, SensitivityRates.FP), 0.0); // false possitive
      ctMap.put(Arrays.<Object>asList(theClass, SensitivityRates.TN), 0.0); // true negative
      ctMap.put(Arrays.<Object>asList(theClass, SensitivityRates.FN), 0.0); // false negative
    }

    int n = validationData.size();
    int c = classesSet.size();

    int correctCount = 0;
    for (Record r : validationData) {
      if (r.getYPredicted().equals(r.getY())) {
        ++correctCount;

        for (Object cl : classesSet) {
          if (cl.equals(r.getYPredicted())) {
            List<Object> tpk = Arrays.<Object>asList(cl, SensitivityRates.TP);
            ctMap.put(tpk, ctMap.get(tpk) + 1.0);
          } else {
            List<Object> tpk = Arrays.<Object>asList(cl, SensitivityRates.TN);
            ctMap.put(tpk, ctMap.get(tpk) + 1.0);
          }
        }
      } else {
        for (Object cl : classesSet) {
          if (cl.equals(r.getYPredicted())) {
            List<Object> tpk = Arrays.<Object>asList(cl, SensitivityRates.FP);
            ctMap.put(tpk, ctMap.get(tpk) + 1.0);
          } else if (cl.equals(r.getY())) {
            List<Object> tpk = Arrays.<Object>asList(cl, SensitivityRates.FN);
            ctMap.put(tpk, ctMap.get(tpk) + 1.0);
          } else {
            List<Object> tpk = Arrays.<Object>asList(cl, SensitivityRates.TN);
            ctMap.put(tpk, ctMap.get(tpk) + 1.0);
          }
        }
      }
    }

    validationMetrics.setAccuracy(correctCount / (double) n);

    // Average Precision, Recall and F1:
    // http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.104.8244&rep=rep1&type=pdf

    for (Object theClass : classesSet) {

      double tp = ctMap.get(Arrays.<Object>asList(theClass, SensitivityRates.TP));
      double fp = ctMap.get(Arrays.<Object>asList(theClass, SensitivityRates.FP));
      double fn = ctMap.get(Arrays.<Object>asList(theClass, SensitivityRates.FN));

      double classPrecision = 0.0;
      double classRecall = 0.0;
      double classF1 = 0.0;
      if (tp > 0.0) {
        classPrecision = tp / (tp + fp);
        classRecall = tp / (tp + fn);
        classF1 = 2.0 * classPrecision * classRecall / (classPrecision + classRecall);
      } else if (tp == 0.0 && fp == 0.0 && fn == 0.0) {
        // if this category did not appear in the dataset then set the metrics to 1
        classPrecision = 1.0;
        classRecall = 1.0;
        classF1 = 1.0;
      }

      validationMetrics.getMicroPrecision().put(theClass, classPrecision);
      validationMetrics.getMicroRecall().put(theClass, classRecall);
      validationMetrics.getMicroF1().put(theClass, classF1);

      validationMetrics.setMacroPrecision(
          validationMetrics.getMacroPrecision() + classPrecision / c);
      validationMetrics.setMacroRecall(validationMetrics.getMacroRecall() + classRecall / c);
      validationMetrics.setMacroF1(validationMetrics.getMacroF1() + classF1 / c);
    }

    return validationMetrics;
  }
  @Override
  protected void estimateModelParameters(Dataset trainingData) {
    int n = trainingData.size();
    int d = trainingData.getColumnSize();

    ModelParameters modelParameters = knowledgeBase.getModelParameters();
    TrainingParameters trainingParameters = knowledgeBase.getTrainingParameters();

    modelParameters.setN(n);
    modelParameters.setD(d);

    // get model parameters
    int k = trainingParameters.getK(); // number of topics
    Map<List<Object>, Integer> topicAssignmentOfDocumentWord =
        modelParameters.getTopicAssignmentOfDocumentWord();
    Map<List<Integer>, Integer> documentTopicCounts = modelParameters.getDocumentTopicCounts();
    Map<List<Object>, Integer> topicWordCounts = modelParameters.getTopicWordCounts();
    Map<Integer, Integer> documentWordCounts = modelParameters.getDocumentWordCounts();
    Map<Integer, Integer> topicCounts = modelParameters.getTopicCounts();

    // initialize topic assignments of each word randomly and update the counters
    for (Record r : trainingData) {
      Integer documentId = r.getId();

      documentWordCounts.put(documentId, r.getX().size());

      for (Map.Entry<Object, Object> entry : r.getX().entrySet()) {
        Object wordPosition = entry.getKey();
        Object word = entry.getValue();

        // sample a topic
        Integer topic = PHPfunctions.mt_rand(0, k - 1);

        increase(topicCounts, topic);
        topicAssignmentOfDocumentWord.put(Arrays.asList(documentId, wordPosition), topic);
        increase(documentTopicCounts, Arrays.asList(documentId, topic));
        increase(topicWordCounts, Arrays.asList(topic, word));
      }
    }

    double alpha = trainingParameters.getAlpha();
    double beta = trainingParameters.getBeta();

    int maxIterations = trainingParameters.getMaxIterations();

    int iteration = 0;
    while (iteration < maxIterations) {

      if (GeneralConfiguration.DEBUG) {
        System.out.println("Iteration " + iteration);
      }

      int changedCounter = 0;
      // collapsed gibbs sampler
      for (Record r : trainingData) {
        Integer documentId = r.getId();

        AssociativeArray topicAssignments = new AssociativeArray();
        for (int j = 0; j < k; ++j) {
          topicAssignments.put(j, 0.0);
        }

        int totalWords = r.getX().size();

        for (Map.Entry<Object, Object> entry : r.getX().entrySet()) {
          Object wordPosition = entry.getKey();
          Object word = entry.getValue();

          // remove the word from the dataset
          Integer topic =
              topicAssignmentOfDocumentWord.get(Arrays.asList(documentId, wordPosition));
          // decrease(documentWordCounts, documentId); //slow
          decrease(topicCounts, topic);
          decrease(documentTopicCounts, Arrays.asList(documentId, topic));
          decrease(topicWordCounts, Arrays.asList(topic, word));

          // int numberOfDocumentWords = r.getX().size()-1; //fast - decreased by 1

          // compute the posteriors of the topics and sample from it
          AssociativeArray topicProbabilities = new AssociativeArray();
          for (int j = 0; j < k; ++j) {
            double enumerator = 0.0;
            Integer njw = topicWordCounts.get(Arrays.asList(j, word));
            if (njw != null) {
              enumerator = njw + beta;
            } else {
              enumerator = beta;
            }

            Integer njd = documentTopicCounts.get(Arrays.asList(documentId, j));
            if (njd != null) {
              enumerator *= (njd + alpha);
            } else {
              enumerator *= alpha;
            }

            double denominator = topicCounts.get((Integer) j) + beta * d;
            // denominator *= numberOfDocumentWords+alpha*k; //this is not necessary because it is
            // the same for all categories, so it can be omited

            topicProbabilities.put(j, enumerator / denominator);
          }

          // normalize probabilities
          Descriptives.normalize(topicProbabilities);

          // sample from these probabilieis
          Integer newTopic =
              (Integer)
                  SRS.weightedProbabilitySampling(topicProbabilities, 1, true).iterator().next();
          topic = newTopic; // new topic assigment

          // add back the word in the dataset
          topicAssignmentOfDocumentWord.put(Arrays.asList(documentId, wordPosition), topic);
          // increase(documentWordCounts, documentId); //slow
          increase(topicCounts, topic);
          increase(documentTopicCounts, Arrays.asList(documentId, topic));
          increase(topicWordCounts, Arrays.asList(topic, word));

          topicAssignments.put(
              topic, Dataset.toDouble(topicAssignments.get(topic)) + 1.0 / totalWords);
        }

        Object mainTopic = MapFunctions.selectMaxKeyValue(topicAssignments).getKey();

        if (!mainTopic.equals(r.getYPredicted())) {
          ++changedCounter;
        }
        r.setYPredicted(mainTopic);
        r.setYPredictedProbabilities(topicAssignments);
      }
      ++iteration;

      if (GeneralConfiguration.DEBUG) {
        System.out.println("Reassigned Records " + changedCounter);
      }

      if (changedCounter == 0) {
        break;
      }
    }

    modelParameters.setTotalIterations(iteration);
  }
  private ValidationMetrics predictAndValidate(Dataset newData) {
    // This method uses similar approach to the training but the most important
    // difference is that we do not wish to modify the original training params.
    // as a result we need to modify the code to use additional temporary
    // counts for the testing data and merge them with the parameters from the
    // training data in order to make a decision
    ModelParameters modelParameters = knowledgeBase.getModelParameters();
    TrainingParameters trainingParameters = knowledgeBase.getTrainingParameters();

    // create new validation metrics object
    ValidationMetrics validationMetrics = knowledgeBase.getEmptyValidationMetricsObject();

    String tmpPrefix = StorageConfiguration.getTmpPrefix();

    // get model parameters
    int n = modelParameters.getN();
    int d = modelParameters.getD();
    int k = trainingParameters.getK(); // number of topics

    Map<List<Object>, Integer> topicWordCounts = modelParameters.getTopicWordCounts();
    Map<Integer, Integer> topicCounts = modelParameters.getTopicCounts();

    BigDataStructureFactory.MapType mapType = knowledgeBase.getMemoryConfiguration().getMapType();
    int LRUsize = knowledgeBase.getMemoryConfiguration().getLRUsize();
    BigDataStructureFactory bdsf = knowledgeBase.getBdsf();

    // we create temporary maps for the prediction sets to avoid modifing the maps that we already
    // learned
    Map<List<Object>, Integer> tmp_topicAssignmentOfDocumentWord =
        bdsf.getMap(tmpPrefix + "topicAssignmentOfDocumentWord", mapType, LRUsize);
    Map<List<Integer>, Integer> tmp_documentTopicCounts =
        bdsf.getMap(tmpPrefix + "documentTopicCounts", mapType, LRUsize);
    Map<List<Object>, Integer> tmp_topicWordCounts =
        bdsf.getMap(tmpPrefix + "topicWordCounts", mapType, LRUsize);
    Map<Integer, Integer> tmp_topicCounts =
        bdsf.getMap(tmpPrefix + "topicCounts", mapType, LRUsize);

    // initialize topic assignments of each word randomly and update the counters
    for (Record r : newData) {
      Integer documentId = r.getId();

      for (Map.Entry<Object, Object> entry : r.getX().entrySet()) {
        Object wordPosition = entry.getKey();
        Object word = entry.getValue();

        // sample a topic
        Integer topic = PHPfunctions.mt_rand(0, k - 1);

        increase(tmp_topicCounts, topic);
        tmp_topicAssignmentOfDocumentWord.put(Arrays.asList(documentId, wordPosition), topic);
        increase(tmp_documentTopicCounts, Arrays.asList(documentId, topic));
        increase(tmp_topicWordCounts, Arrays.asList(topic, word));
      }
    }

    double alpha = trainingParameters.getAlpha();
    double beta = trainingParameters.getBeta();

    int maxIterations = trainingParameters.getMaxIterations();

    double perplexity = Double.MAX_VALUE;
    for (int iteration = 0; iteration < maxIterations; ++iteration) {

      if (GeneralConfiguration.DEBUG) {
        System.out.println("Iteration " + iteration);
      }

      // collapsed gibbs sampler
      int changedCounter = 0;
      perplexity = 0.0;
      double totalDatasetWords = 0.0;
      for (Record r : newData) {
        Integer documentId = r.getId();

        AssociativeArray topicAssignments = new AssociativeArray();
        for (int j = 0; j < k; ++j) {
          topicAssignments.put(j, 0.0);
        }

        int totalDocumentWords = r.getX().size();
        totalDatasetWords += totalDocumentWords;
        for (Map.Entry<Object, Object> entry : r.getX().entrySet()) {
          Object wordPosition = entry.getKey();
          Object word = entry.getValue();

          // remove the word from the dataset
          Integer topic =
              tmp_topicAssignmentOfDocumentWord.get(Arrays.asList(documentId, wordPosition));
          decrease(tmp_topicCounts, topic);
          decrease(tmp_documentTopicCounts, Arrays.asList(documentId, topic));
          decrease(tmp_topicWordCounts, Arrays.asList(topic, word));

          int numberOfDocumentWords = r.getX().size() - 1;

          // compute the posteriors of the topics and sample from it
          AssociativeArray topicProbabilities = new AssociativeArray();
          for (int j = 0; j < k; ++j) {
            double enumerator = 0.0;

            // get the counts from the current testing data
            List<Object> topicWordKey = Arrays.asList(j, word);
            Integer njw = tmp_topicWordCounts.get(topicWordKey);
            if (njw != null) {
              enumerator = njw + beta;
            } else {
              enumerator = beta;
            }

            // get also the counts from the training data
            Integer njw_original = topicWordCounts.get(topicWordKey);
            if (njw_original != null) {
              enumerator += njw_original;
            }

            Integer njd = tmp_documentTopicCounts.get(Arrays.asList(documentId, j));
            if (njd != null) {
              enumerator *= (njd + alpha);
            } else {
              enumerator *= alpha;
            }

            // add the counts from testing data
            double denominator = tmp_topicCounts.get((Integer) j) + beta * d - 1;
            // and the ones from training data
            denominator += topicCounts.get((Integer) j);
            denominator *= numberOfDocumentWords + alpha * k;

            topicProbabilities.put(j, enumerator / denominator);
          }

          perplexity += Math.log(Descriptives.sum(topicProbabilities.toFlatDataCollection()));

          // normalize probabilities
          Descriptives.normalize(topicProbabilities);

          // sample from these probabilieis
          Integer newTopic =
              (Integer)
                  SRS.weightedProbabilitySampling(topicProbabilities, 1, true).iterator().next();
          topic = newTopic; // new topic assignment

          // add back the word in the dataset
          tmp_topicAssignmentOfDocumentWord.put(Arrays.asList(documentId, wordPosition), topic);
          increase(tmp_topicCounts, topic);
          increase(tmp_documentTopicCounts, Arrays.asList(documentId, topic));
          increase(tmp_topicWordCounts, Arrays.asList(topic, word));

          topicAssignments.put(
              topic, Dataset.toDouble(topicAssignments.get(topic)) + 1.0 / totalDocumentWords);
        }

        Object mainTopic = MapFunctions.selectMaxKeyValue(topicAssignments).getKey();

        if (!mainTopic.equals(r.getYPredicted())) {
          ++changedCounter;
        }
        r.setYPredicted(mainTopic);
        r.setYPredictedProbabilities(topicAssignments);
      }

      perplexity = Math.exp(-perplexity / totalDatasetWords);

      if (GeneralConfiguration.DEBUG) {
        System.out.println("Reassigned Records " + changedCounter + " - Perplexity: " + perplexity);
      }

      if (changedCounter == 0) {
        break;
      }
    }

    // Drop the temporary Collection
    bdsf.dropTable(tmpPrefix + "topicAssignmentOfDocumentWord", tmp_topicAssignmentOfDocumentWord);
    bdsf.dropTable(tmpPrefix + "documentTopicCounts", tmp_documentTopicCounts);
    bdsf.dropTable(tmpPrefix + "topicWordCounts", tmp_topicWordCounts);
    bdsf.dropTable(tmpPrefix + "topicCounts", tmp_topicCounts);

    validationMetrics.setPerplexity(perplexity);

    return validationMetrics;
  }
Esempio n. 5
0
  /**
   * Calculates the score of Chisquare test.
   *
   * @param dataTable
   * @return
   * @throws IllegalArgumentException
   */
  public static AssociativeArray getScore(DataTable2D dataTable) throws IllegalArgumentException {
    if (dataTable.isValid() == false) {
      throw new IllegalArgumentException();
    }
    // Estimate marginal scores and sum
    Map<Object, Double> XdotJ = new HashMap<>();
    Map<Object, Double> XIdot = new HashMap<>();
    double Xdotdot = 0.0;

    for (Map.Entry<Object, AssociativeArray> entry1 : dataTable.entrySet()) {
      Object i = entry1.getKey();
      AssociativeArray row = entry1.getValue();

      for (Map.Entry<Object, Object> entry2 : row.entrySet()) {
        Object j = entry2.getKey();
        Object value = entry2.getValue();

        double v = Dataset.toDouble(value);

        // Summing the columns
        if (XdotJ.containsKey(j) == false) {
          XdotJ.put(j, v);
        } else {
          XdotJ.put(j, XdotJ.get(j) + v);
        }

        // Summing the rows
        if (XIdot.containsKey(i) == false) {
          XIdot.put(i, v);
        } else {
          XIdot.put(i, XIdot.get(i) + v);
        }

        Xdotdot += v;
      }
    }

    int k = XdotJ.size();
    int n = XIdot.size();

    // Calculating Chisquare score
    double ChisquareScore = 0.0;
    if (k == 2
        && n
            == 2) { // if 2x2 then perform the Yates correction. Make this check outside the loops
                    // to make it faster
      for (Map.Entry<Object, AssociativeArray> entry1 : dataTable.entrySet()) {
        Object i = entry1.getKey();
        AssociativeArray row = entry1.getValue();

        for (Map.Entry<Object, Object> entry2 : row.entrySet()) {
          Object j = entry2.getKey();
          Object value = entry2.getValue();

          double v = Dataset.toDouble(value);

          // expected value under null hypothesis
          double eij = XIdot.get(i) * XdotJ.get(j) / Xdotdot;
          if (eij == 0) {
            continue;
          }
          ChisquareScore += Math.pow((Math.abs(v - eij) - 0.5), 2) / eij;
        }
      }
    } else {
      for (Map.Entry<Object, AssociativeArray> entry1 : dataTable.entrySet()) {
        Object i = entry1.getKey();
        AssociativeArray row = entry1.getValue();

        for (Map.Entry<Object, Object> entry2 : row.entrySet()) {
          Object j = entry2.getKey();
          Object value = entry2.getValue();

          double v = Dataset.toDouble(value);

          // expected value under null hypothesis
          double eij = XIdot.get(i) * XdotJ.get(j) / Xdotdot;

          ChisquareScore += Math.pow((v - eij), 2) / eij;
        }
      }
    }
    XdotJ = null;
    XIdot = null;

    AssociativeArray result = new AssociativeArray();
    result.put("k", k);
    result.put("n", n);
    result.put("score", ChisquareScore);

    return result;
  }