Ejemplo n.º 1
0
  @Override
  public double regress(DataPoint data) {
    Vec w = new DenseVector(baseRegressors.size());
    for (int i = 0; i < baseRegressors.size(); i++) w.set(i, baseRegressors.get(i).regress(data));

    return aggregatingRegressor.regress(new DataPoint(w));
  }
Ejemplo n.º 2
0
  @Before
  public void setUp() {
    zero = new DenseVector(5);

    ones = new DenseVector(5);
    ones.mutableAdd(1.0);

    half = new DenseVector(5);
    half.mutableAdd(0.5);

    inc = new DenseVector(5);
    for (int i = 0; i < inc.length(); i++) inc.set(i, i);

    vecs = Arrays.asList(zero, ones, half, inc);
    expected =
        new double[][] {
          {
            0, 1, 0.5, 4,
          },
          {
            1, 0, 0.5, 3,
          },
          {
            0.5, 0.5, 0.0, 3.5,
          },
          {
            4, 3, 3.5, 0,
          }
        };
  }
Ejemplo n.º 3
0
  @Override
  public void trainC(ClassificationDataSet dataSet, ExecutorService threadPool) {
    final int models = baseClassifiers.size();
    final int C = dataSet.getClassSize();
    weightsPerModel = C == 2 ? 1 : C;
    ClassificationDataSet metaSet =
        new ClassificationDataSet(
            weightsPerModel * models, new CategoricalData[0], dataSet.getPredicting());

    List<ClassificationDataSet> dataFolds = dataSet.cvSet(folds);
    // iterate in the order of the folds so we get the right dataum weights
    for (ClassificationDataSet cds : dataFolds)
      for (int i = 0; i < cds.getSampleSize(); i++)
        metaSet.addDataPoint(
            new DenseVector(weightsPerModel * models),
            cds.getDataPointCategory(i),
            cds.getDataPoint(i).getWeight());

    // create the meta training set
    for (int c = 0; c < baseClassifiers.size(); c++) {
      Classifier cl = baseClassifiers.get(c);
      int pos = 0;
      for (int f = 0; f < dataFolds.size(); f++) {
        ClassificationDataSet train = ClassificationDataSet.comineAllBut(dataFolds, f);
        ClassificationDataSet test = dataFolds.get(f);
        if (threadPool == null) cl.trainC(train);
        else cl.trainC(train, threadPool);
        for (int i = 0;
            i < test.getSampleSize();
            i++) // evaluate and mark each point in the held out fold.
        {
          CategoricalResults pred = cl.classify(test.getDataPoint(i));
          if (C == 2)
            metaSet.getDataPoint(pos).getNumericalValues().set(c, pred.getProb(0) * 2 - 1);
          else {
            Vec toSet = metaSet.getDataPoint(pos).getNumericalValues();
            for (int j = weightsPerModel * c; j < weightsPerModel * (c + 1); j++)
              toSet.set(j, pred.getProb(j - weightsPerModel * c));
          }

          pos++;
        }
      }
    }

    // train the meta model
    if (threadPool == null) aggregatingClassifier.trainC(metaSet);
    else aggregatingClassifier.trainC(metaSet, threadPool);

    // train the final classifiers, unless folds=1. In that case they are already trained
    if (folds != 1) {
      for (Classifier cl : baseClassifiers)
        if (threadPool == null) cl.trainC(dataSet);
        else cl.trainC(dataSet, threadPool);
    }
  }
Ejemplo n.º 4
0
  @Override
  public CategoricalResults classify(DataPoint data) {
    Vec w = new DenseVector(weightsPerModel * baseClassifiers.size());
    if (weightsPerModel == 1)
      for (int i = 0; i < baseClassifiers.size(); i++)
        w.set(i, baseClassifiers.get(i).classify(data).getProb(0) * 2 - 1);
    else {
      for (int i = 0; i < baseClassifiers.size(); i++) {
        CategoricalResults pred = baseClassifiers.get(i).classify(data);
        for (int j = 0; j < weightsPerModel; j++) w.set(i * weightsPerModel + j, pred.getProb(j));
      }
    }

    return aggregatingClassifier.classify(new DataPoint(w));
  }
Ejemplo n.º 5
0
  public void train(RegressionDataSet dataSet, ExecutorService threadPool) {
    List<Vec> inputs = new ArrayList<Vec>(dataSet.getSampleSize());
    for (int i = 0; i < dataSet.getSampleSize(); i++)
      inputs.add(dataSet.getDataPoint(i).getNumericalValues());

    coefficents = new DenseVector(dataSet.getNumNumericalVars() + 1);
    Vec targetValues = dataSet.getTargetValues();
    double minTarget = targetValues.min();
    double maxTarget = targetValues.max();
    shift = minTarget;
    scale = maxTarget - minTarget;

    // Now all values are in the range [0, 1]
    targetValues.subtract(shift);
    targetValues.mutableDivide(scale);

    Optimizer optimizer = new IterativelyReweightedLeastSquares();

    coefficents =
        optimizer.optimize(
            1e-5, 100, logitFun, logitFunD, coefficents, inputs, targetValues, threadPool);
  }
Ejemplo n.º 6
0
  @Test
  public void testDist_Vec_Vec() {
    System.out.println("dist");

    ChebyshevDistance dist = new ChebyshevDistance();

    List<Double> cache = dist.getAccelerationCache(vecs);
    List<Double> cache2 = dist.getAccelerationCache(vecs, ex);
    if (cache != null) {
      assertEquals(cache.size(), cache2.size());
      for (int i = 0; i < cache.size(); i++) assertEquals(cache.get(i), cache2.get(i), 0.0);
      assertTrue(dist.supportsAcceleration());
    } else {
      assertNull(cache2);
      assertFalse(dist.supportsAcceleration());
    }

    try {
      dist.dist(half, new DenseVector(half.length() + 1));
      fail("Distance between vecs should have erred");
    } catch (Exception ex) {

    }

    for (int i = 0; i < vecs.size(); i++)
      for (int j = 0; j < vecs.size(); j++) {
        ChebyshevDistance d = dist.clone();
        assertEquals(expected[i][j], d.dist(vecs.get(i), vecs.get(j)), 1e-12);
        assertEquals(expected[i][j], d.dist(i, j, vecs, cache), 1e-12);
        assertEquals(expected[i][j], d.dist(i, vecs.get(j), vecs, cache), 1e-12);
        assertEquals(
            expected[i][j],
            d.dist(i, vecs.get(j), dist.getQueryInfo(vecs.get(j)), vecs, cache),
            1e-12);
      }
  }
Ejemplo n.º 7
0
 public double f(Vec x) {
   return f(x.arrayCopy());
 }
Ejemplo n.º 8
0
  @Override
  public int[] cluster(
      DataSet dataSet, int lowK, int highK, ExecutorService threadpool, int[] designations) {
    if (highK == lowK) return cluster(dataSet, lowK, threadpool, designations);
    else if (highK < lowK)
      throw new IllegalArgumentException(
          "low value of k (" + lowK + ") must be higher than the high value of k(" + highK + ")");
    final int N = dataSet.getSampleSize();
    final int D = dataSet.getNumNumericalVars();
    fKs = new double[highK - 1]; // we HAVE to start from k=2
    fKs[0] = 1.0; // see eq(2)

    int[] bestCluster = new int[N];
    double minFk =
        lowK == 1
            ? 1.0
            : Double
                .POSITIVE_INFINITY; // If our low k is > 1, force the check later to kick in at the
                                    // first candidate k by making fK appear Inf

    if (designations == null || designations.length < N) designations = new int[N];

    double alphaKprev = 0, S_k_prev = 0;

    // re used every iteration
    List<Vec> curMeans = new ArrayList<Vec>(highK);
    means = new ArrayList<Vec>(); // the best set of means
    // pre-compute cache instead of re-computing every time
    List<Double> accelCache = dm.getAccelerationCache(dataSet.getDataVectors(), threadpool);

    for (int k = 2; k < highK; k++) {
      curMeans.clear();
      // kmeans objective function result is the same as S_k
      double S_k =
          cluster(
              dataSet,
              accelCache,
              k,
              curMeans,
              designations,
              true,
              threadpool,
              true); // TODO could add a flag to make approximate S_k an option. Though it dosn't
                     // seem to work great on toy problems, might be fine on more realistic data

      double alpha_k;
      if (k == 2) alpha_k = 1 - 3.0 / (4 * D); // eq(3a)
      else alpha_k = alphaKprev + (1 - alphaKprev) / 6; // eq(3b)

      double fK; // eq(2)
      if (S_k_prev == 0) fKs[k - 1] = fK = 1;
      else fKs[k - 1] = fK = S_k / (alpha_k * S_k_prev);

      alphaKprev = alpha_k;
      S_k_prev = S_k;

      if (k >= lowK && minFk > fK) {
        System.arraycopy(designations, 0, bestCluster, 0, N);
        minFk = fK;
        means.clear();
        for (Vec mean : curMeans) means.add(mean.clone());
      }
    }

    // contract is we return designations with the data in it if we can, so copy the values back
    System.arraycopy(bestCluster, 0, designations, 0, N);
    return designations;
  }
Ejemplo n.º 9
0
 @Override
 public double eval(Vec a, Vec b) {
   if (a == b) // Same refrence means dist of 0, exp(0) = 1
   return 1;
   return Math.exp(-Math.pow(a.pNormDist(2, b), 2) * sigmaSqrd2Inv);
 }
Ejemplo n.º 10
0
 private double logitReg(Vec input) {
   double z = coefficents.get(0);
   for (int i = 1; i < coefficents.length(); i++) z += input.get(i - 1) * coefficents.get(i);
   return logit(z);
 }
Ejemplo n.º 11
0
 @Override
 public double getBias() {
   return coefficents.get(0);
 }
Ejemplo n.º 12
0
 @Override
 public Vec getRawWeight() {
   return new SubVector(1, coefficents.length() - 1, coefficents);
 }