private Evaluation eval(MultiLayerNetwork network) {
   Evaluation ev = new Evaluation(nIn);
   INDArray predict = network.output(reshapeInput(data));
   ev.eval(data, predict);
   log.info(ev.stats());
   return ev;
 }
  @Test
  public void testBackProp() {
    Nd4j.getRandom().setSeed(123);
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
            .iterations(10)
            .weightInit(WeightInit.XAVIER)
            .dist(new UniformDistribution(0, 1))
            .activationFunction("tanh")
            .nIn(4)
            .nOut(3)
            .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer())
            .list(3)
            .backward(true)
            .pretrain(false)
            .hiddenLayerSizes(new int[] {3, 2})
            .override(
                2,
                new ConfOverride() {
                  @Override
                  public void overrideLayer(int i, NeuralNetConfiguration.Builder builder) {
                    builder.activationFunction("softmax");
                    builder.layer(new org.deeplearning4j.nn.conf.layers.OutputLayer());
                    builder.lossFunction(LossFunctions.LossFunction.MCXENT);
                  }
                })
            .build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.setListeners(Lists.<IterationListener>newArrayList(new ScoreIterationListener(1)));

    DataSetIterator iter = new IrisDataSetIterator(150, 150);

    DataSet next = iter.next();
    next.normalizeZeroMeanZeroUnitVariance();
    SplitTestAndTrain trainTest = next.splitTestAndTrain(110);
    network.setInput(trainTest.getTrain().getFeatureMatrix());
    network.setLabels(trainTest.getTrain().getLabels());
    network.init();
    network.fit(trainTest.getTrain());

    DataSet test = trainTest.getTest();
    Evaluation eval = new Evaluation();
    INDArray output = network.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());
  }
  public static void main(String[] args) throws Exception {
    final int numRows = 28;
    final int numColumns = 28;
    int outputNum = 10;
    int numSamples = 60000;
    int batchSize = 100;
    int iterations = 10;
    int seed = 123;
    int listenerFreq = batchSize / 5;

    log.info("Load data....");
    DataSetIterator iter = new MnistDataSetIterator(batchSize, numSamples, true);

    log.info("Build model....");
    MultiLayerNetwork model = softMaxRegression(seed, iterations, numRows, numColumns, outputNum);
    //		// MultiLayerNetwork model = deepBeliefNetwork(seed, iterations,
    //		// numRows, numColumns, outputNum);
    //		MultiLayerNetwork model = deepConvNetwork(seed, iterations, numRows,
    //				numColumns, outputNum);

    model.init();
    model.setListeners(
        Collections.singletonList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    model.fit(iter); // achieves end to end pre-training

    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputNum);

    DataSetIterator testIter = new MnistDataSetIterator(100, 10000);
    while (testIter.hasNext()) {
      DataSet testMnist = testIter.next();
      INDArray predict2 = model.output(testMnist.getFeatureMatrix());
      eval.eval(testMnist.getLabels(), predict2);
    }

    log.info(eval.stats());
    log.info("****************Example finished********************");
  }
  public static void main(String[] args) {

    final int numRows = 2;
    final int numColumns = 2;
    int nChannels = 1;
    int outputNum = 3;
    int numSamples = 150;
    int batchSize = 110;
    int iterations = 10;
    int splitTrainNum = 100;
    int seed = 123;
    int listenerFreq = 1;

    /** Set a neural network configuration with multiple layers */
    log.info("Load data....");
    DataSetIterator irisIter = new IrisDataSetIterator(batchSize, numSamples);
    DataSet iris = irisIter.next();
    iris.normalizeZeroMeanZeroUnitVariance();

    SplitTestAndTrain trainTest = iris.splitTestAndTrain(splitTrainNum, new Random(seed));

    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .batchSize(batchSize)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .constrainGradientToUnitNorm(true)
            .l2(2e-4)
            .regularization(true)
            .useDropConnect(true)
            .list(2)
            .layer(
                0,
                new ConvolutionLayer.Builder(new int[] {1, 1})
                    .nIn(nChannels)
                    .nOut(6)
                    .dropOut(0.5)
                    .activation("relu")
                    .weightInit(WeightInit.XAVIER)
                    .build())
            .layer(
                1,
                new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                    .nIn(6)
                    .nOut(outputNum)
                    .weightInit(WeightInit.XAVIER)
                    .activation("softmax")
                    .build())
            .inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, nChannels))
            .inputPreProcessor(1, new CnnToFeedForwardPreProcessor())
            .backprop(true)
            .pretrain(false)
            .build();

    log.info("Build model....");
    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();
    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    model.fit(trainTest.getTrain());

    log.info("Evaluate weights....");
    for (org.deeplearning4j.nn.api.Layer layer : model.getLayers()) {
      INDArray w = layer.getParam(DefaultParamInitializer.WEIGHT_KEY);
      log.info("Weights: " + w);
    }

    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputNum);
    INDArray output = model.output(trainTest.getTest().getFeatureMatrix());
    eval.eval(trainTest.getTest().getLabels(), output);
    log.info(eval.stats());

    log.info("****************Example finished********************");
  }
  public void trainMLP() throws Exception {
    Nd4j.ENFORCE_NUMERICAL_STABILITY = true;
    final int numRows = 28;
    final int numColumns = 28;
    int outputNum = 10;
    int numSamples = 10000;
    int batchSize = 500;
    int iterations = 10;
    int seed = 123;
    int listenerFreq = iterations / 5;
    int splitTrainNum = (int) (batchSize * .8);
    DataSet mnist;
    SplitTestAndTrain trainTest;
    DataSet trainInput;
    List<INDArray> testInput = new ArrayList<>();
    List<INDArray> testLabels = new ArrayList<>();

    log.info("Load data....");
    DataSetIterator mnistIter = new MnistDataSetIterator(batchSize, numSamples, true);

    log.info("Build model....");
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .seed(seed)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .iterations(iterations)
            .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
            .learningRate(1e-1f)
            .momentum(0.5)
            .momentumAfter(Collections.singletonMap(3, 0.9))
            .useDropConnect(true)
            .list(2)
            .layer(
                0,
                new DenseLayer.Builder()
                    .nIn(numRows * numColumns)
                    .nOut(1000)
                    .activation("relu")
                    .weightInit(WeightInit.XAVIER)
                    .build())
            .layer(
                1,
                new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD)
                    .nIn(1000)
                    .nOut(outputNum)
                    .activation("softmax")
                    .weightInit(WeightInit.XAVIER)
                    .build())
            .build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();
    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
    while (mnistIter.hasNext()) {
      mnist = mnistIter.next();
      trainTest =
          mnist.splitTestAndTrain(splitTrainNum, new Random(seed)); // train set that is the result
      trainInput = trainTest.getTrain(); // get feature matrix and labels for training
      testInput.add(trainTest.getTest().getFeatureMatrix());
      testLabels.add(trainTest.getTest().getLabels());
      model.fit(trainInput);
    }

    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputNum);
    for (int i = 0; i < testInput.size(); i++) {
      INDArray output = model.output(testInput.get(i));
      eval.eval(testLabels.get(i), output);
    }

    log.info(eval.stats());
    log.info("****************Example finished********************");
  }
  @Test
  public void testDbn() throws Exception {
    Nd4j.MAX_SLICES_TO_PRINT = -1;
    Nd4j.MAX_ELEMENTS_PER_SLICE = -1;
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .iterations(100)
            .layer(new org.deeplearning4j.nn.conf.layers.RBM())
            .weightInit(WeightInit.DISTRIBUTION)
            .dist(new UniformDistribution(0, 1))
            .activationFunction("tanh")
            .momentum(0.9)
            .optimizationAlgo(OptimizationAlgorithm.LBFGS)
            .constrainGradientToUnitNorm(true)
            .k(1)
            .regularization(true)
            .l2(2e-4)
            .visibleUnit(org.deeplearning4j.nn.conf.layers.RBM.VisibleUnit.GAUSSIAN)
            .hiddenUnit(org.deeplearning4j.nn.conf.layers.RBM.HiddenUnit.RECTIFIED)
            .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
            .nIn(4)
            .nOut(3)
            .list(2)
            .hiddenLayerSizes(3)
            .override(1, new ClassifierOverride(1))
            .build();

    NeuralNetConfiguration conf2 =
        new NeuralNetConfiguration.Builder()
            .layer(new org.deeplearning4j.nn.conf.layers.RBM())
            .nIn(784)
            .nOut(600)
            .applySparsity(true)
            .sparsity(0.1)
            .build();

    Layer l =
        LayerFactories.getFactory(conf2)
            .create(conf2, Arrays.<IterationListener>asList(new ScoreIterationListener(2)), 0);

    MultiLayerNetwork d = new MultiLayerNetwork(conf);

    DataSetIterator iter = new IrisDataSetIterator(150, 150);

    DataSet next = iter.next();

    Nd4j.writeTxt(next.getFeatureMatrix(), "iris.txt", "\t");

    next.normalizeZeroMeanZeroUnitVariance();

    SplitTestAndTrain testAndTrain = next.splitTestAndTrain(110);
    DataSet train = testAndTrain.getTrain();

    d.fit(train);

    DataSet test = testAndTrain.getTest();

    Evaluation eval = new Evaluation();
    INDArray output = d.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());
  }