public INDArray getMnistData() throws Exception {
    int inputWidth = 28;
    int inputHeight = 28;
    int nChannelsIn = 1;
    int nExamples = 5;

    DataSetIterator data = new MnistDataSetIterator(nExamples, nExamples);
    DataSet mnist = data.next();
    nExamples = mnist.numExamples();
    return mnist.getFeatureMatrix().reshape(nExamples, nChannelsIn, inputHeight, inputWidth);
  }
  @Test
  public void testCNNMLN() throws Exception {
    Nd4j.ENFORCE_NUMERICAL_STABILITY = true;

    final int numRows = 28;
    final int numColumns = 28;
    int nChannels = 1;
    int outputNum = 10;
    int numSamples = 10;
    int batchSize = 10;
    int iterations = 10;
    int seed = 123;
    int listenerFreq = iterations / 5;

    DataSetIterator mnistIter = new MnistDataSetIterator(batchSize, numSamples, true);

    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .seed(seed)
            .batchSize(batchSize)
            .iterations(iterations)
            .weightInit(WeightInit.XAVIER)
            .activationFunction("relu")
            .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
            .list(3)
            .layer(
                0, new ConvolutionLayer.Builder(new int[] {10, 10}).nIn(nChannels).nOut(6).build())
            .layer(
                1,
                new SubsamplingLayer.Builder(SubsamplingLayer.PoolingType.MAX, new int[] {2, 2})
                    .build())
            .layer(
                2,
                new OutputLayer.Builder(LossFunctions.LossFunction.NEGATIVELOGLIKELIHOOD)
                    .nIn(150)
                    .nOut(outputNum)
                    .activation("softmax")
                    .build())
            .inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, 1))
            .inputPreProcessor(2, new CnnToFeedForwardPreProcessor())
            .backprop(true)
            .pretrain(false)
            .build();
    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();

    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
    model.fit(mnistIter);

    DataSet data = mnistIter.next();
  }
Exemplo n.º 3
0
  public void train() {
    DataSetIterator iter = new ProfileIterator(trainingData, confirmationSet, imgDatasets, true);
    int iEpoch = 0;
    int nEpochs = 300;

    while (iEpoch < nEpochs) {
      System.out.printf("EPOCH %d\n", iEpoch);

      Evaluation eval = new Evaluation();
      while (iter.hasNext()) {
        DataSet ds = iter.next();
        net.train(ds);

        INDArray predict2 = net.output(ds.getFeatureMatrix());
        INDArray labels2 = ds.getLabels();
        //                eval.evalTimeSeries(labels2, predict2);
      }

      iter.reset();
      //            System.out.println(eval.stats());
      iEpoch++;
    }
    System.out.println("Fitting : DONE");
  }
Exemplo n.º 4
0
 @Override
 public void fit(DataSetIterator iter) {
   while (iter.hasNext()) fit(iter.next());
 }
  public static void main(String[] args) throws Exception {
    final int numRows = 28;
    final int numColumns = 28;
    int seed = 123;
    int numSamples = MnistDataFetcher.NUM_EXAMPLES;
    int batchSize = 1000;
    int iterations = 1;
    int listenerFreq = iterations / 5;

    log.info("Load data....");
    DataSetIterator iter = new MnistDataSetIterator(batchSize, numSamples, true);

    log.info("Build model....");
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .optimizationAlgo(OptimizationAlgorithm.LINE_GRADIENT_DESCENT)
            .list()
            .layer(
                0,
                new RBM.Builder()
                    .nIn(numRows * numColumns)
                    .nOut(1000)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build())
            .layer(
                1,
                new RBM.Builder()
                    .nIn(1000)
                    .nOut(500)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build())
            .layer(
                2,
                new RBM.Builder()
                    .nIn(500)
                    .nOut(250)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build())
            .layer(
                3,
                new RBM.Builder()
                    .nIn(250)
                    .nOut(100)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build())
            .layer(
                4,
                new RBM.Builder()
                    .nIn(100)
                    .nOut(30)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build()) // encoding stops
            .layer(
                5,
                new RBM.Builder()
                    .nIn(30)
                    .nOut(100)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build()) // decoding starts
            .layer(
                6,
                new RBM.Builder()
                    .nIn(100)
                    .nOut(250)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build())
            .layer(
                7,
                new RBM.Builder()
                    .nIn(250)
                    .nOut(500)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build())
            .layer(
                8,
                new RBM.Builder()
                    .nIn(500)
                    .nOut(1000)
                    .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
                    .build())
            .layer(
                9,
                new OutputLayer.Builder(LossFunctions.LossFunction.RMSE_XENT)
                    .nIn(1000)
                    .nOut(numRows * numColumns)
                    .build())
            .pretrain(true)
            .backprop(true)
            .build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();

    model.setListeners(
        Collections.singletonList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    while (iter.hasNext()) {
      DataSet next = iter.next();
      model.fit(new DataSet(next.getFeatureMatrix(), next.getFeatureMatrix()));
    }
  }
Exemplo n.º 6
0
  @Test
  public void testTransform() {
    /*Random dataset is generated such that
       AX + B where X is from a normal distribution with mean 0 and std 1
       The mean of above will be B and std A
       Obtained mean and std dev are compared to theoretical
       Transformed values should be the same as X with the same seed.
    */
    long randSeed = 7139183;

    int nFeatures = 2;
    int nSamples = 6400;
    int bsize = 8;
    int a = 2;
    int b = 10;
    INDArray sampleMean, sampleStd, sampleMeanDelta, sampleStdDelta, delta, deltaPerc;
    double maxDeltaPerc, sampleMeanSEM;

    genRandomDataSet normData = new genRandomDataSet(nSamples, nFeatures, a, b, randSeed);
    genRandomDataSet expectedData = new genRandomDataSet(nSamples, nFeatures, 1, 0, randSeed);
    genRandomDataSet beforeTransformData =
        new genRandomDataSet(nSamples, nFeatures, a, b, randSeed);

    NormalizerStandardize myNormalizer = new NormalizerStandardize();
    DataSetIterator normIterator = normData.getIter(bsize);
    DataSetIterator expectedIterator = expectedData.getIter(bsize);
    DataSetIterator beforeTransformIterator = beforeTransformData.getIter(bsize);

    myNormalizer.fit(normIterator);

    double tolerancePerc = 5.0; // within 5%
    sampleMean = myNormalizer.getMean();
    sampleMeanDelta = Transforms.abs(sampleMean.sub(normData.theoreticalMean));
    assertTrue(
        sampleMeanDelta.mul(100).div(normData.theoreticalMean).max(1).getDouble(0, 0)
            < tolerancePerc);
    // sanity check to see if it's within the theoretical standard error of mean
    sampleMeanSEM = sampleMeanDelta.div(normData.theoreticalSEM).max(1).getDouble(0, 0);
    assertTrue(sampleMeanSEM < 2.6); // 99% of the time it should be within this many SEMs

    tolerancePerc = 10.0; // within 10%
    sampleStd = myNormalizer.getStd();
    sampleStdDelta = Transforms.abs(sampleStd.sub(normData.theoreticalStd));
    assertTrue(
        sampleStdDelta.div(normData.theoreticalStd).max(1).mul(100).getDouble(0, 0)
            < tolerancePerc);

    normIterator.setPreProcessor(myNormalizer);
    while (normIterator.hasNext()) {
      INDArray before = beforeTransformIterator.next().getFeatures();
      INDArray after = normIterator.next().getFeatures();
      INDArray expected = expectedIterator.next().getFeatures();
      delta = Transforms.abs(after.sub(expected));
      deltaPerc = delta.div(before.sub(expected));
      deltaPerc.muli(100);
      maxDeltaPerc = deltaPerc.max(0, 1).getDouble(0, 0);
      // System.out.println("=== BEFORE ===");
      // System.out.println(before);
      // System.out.println("=== AFTER ===");
      // System.out.println(after);
      // System.out.println("=== SHOULD BE ===");
      // System.out.println(expected);
      assertTrue(maxDeltaPerc < tolerancePerc);
    }
  }