@Test
  public void testDbnFaces() {
    DataSetIterator iter = new LFWDataSetIterator(28, 28);

    DataSet next = iter.next();
    next.normalizeZeroMeanZeroUnitVariance();

    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .nIn(next.numInputs())
            .nOut(next.numOutcomes())
            .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
            .constrainGradientToUnitNorm(true)
            .weightInit(WeightInit.DISTRIBUTION)
            .dist(new NormalDistribution(0, 1e-5))
            .iterations(10)
            .learningRate(1e-3)
            .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
            .visibleUnit(RBM.VisibleUnit.GAUSSIAN)
            .hiddenUnit(RBM.HiddenUnit.RECTIFIED)
            .layer(new RBM())
            .list(4)
            .hiddenLayerSizes(600, 250, 100)
            .override(3, new ClassifierOverride())
            .build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.setListeners(
        Arrays.<IterationListener>asList(
            new ScoreIterationListener(10), new NeuralNetPlotterIterationListener(1)));
    network.fit(next);
  }
  public static void main(String[] args) throws Exception {
    // batches of 10, 60000 examples total
    DataSetIterator iter = new MnistDataSetIterator(10, 300);

    DBN dbn = SerializationUtils.readObject(new File(args[0]));
    for (int i = 0; i < dbn.getnLayers(); i++) dbn.getLayers()[i].setRenderEpochs(10);

    DeepAutoEncoder encoder = new DeepAutoEncoder(dbn);
    encoder.setRoundCodeLayerInput(true);
    encoder.setNormalizeCodeLayerOutput(false);
    encoder.setOutputLayerLossFunction(OutputLayer.LossFunction.RMSE_XENT);
    encoder.setOutputLayerActivation(Activations.sigmoid());
    encoder.setVisibleUnit(RBM.VisibleUnit.GAUSSIAN);
    encoder.setHiddenUnit(RBM.HiddenUnit.BINARY);

    while (iter.hasNext()) {
      DataSet next = iter.next();
      if (next == null) break;
      log.info("Training on " + next.numExamples());
      log.info("Coding layer is " + encoder.encode(next.getFirst()));
      encoder.finetune(next.getFirst(), 1e-1, 1000);
      NeuralNetPlotter plotter = new NeuralNetPlotter();
      encoder.getDecoder().feedForward(encoder.encodeWithScaling(next.getFirst()));
      String[] layers = new String[encoder.getDecoder().getnLayers()];
      DoubleMatrix[] weights = new DoubleMatrix[layers.length];
      for (int i = 0; i < encoder.getDecoder().getnLayers(); i++) {
        layers[i] = "" + i;
        weights[i] = encoder.getDecoder().getLayers()[i].getW();
      }

      plotter.histogram(layers, weights);

      FilterRenderer f = new FilterRenderer();
      f.renderFilters(
          encoder.getDecoder().getOutputLayer().getW(),
          "outputlayer.png",
          28,
          28,
          next.numExamples());
      DeepAutoEncoderDataSetReconstructionRender render =
          new DeepAutoEncoderDataSetReconstructionRender(
              next.iterator(next.numExamples()), encoder);
      render.draw();
    }

    SerializationUtils.saveObject(encoder, new File("deepautoencoder.ser"));

    iter.reset();

    DeepAutoEncoderDataSetReconstructionRender render =
        new DeepAutoEncoderDataSetReconstructionRender(iter, encoder);
    render.draw();
  }
  @Test
  public void testBackProp() {
    Nd4j.getRandom().setSeed(123);
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
            .iterations(10)
            .weightInit(WeightInit.XAVIER)
            .dist(new UniformDistribution(0, 1))
            .activationFunction("tanh")
            .nIn(4)
            .nOut(3)
            .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer())
            .list(3)
            .backward(true)
            .pretrain(false)
            .hiddenLayerSizes(new int[] {3, 2})
            .override(
                2,
                new ConfOverride() {
                  @Override
                  public void overrideLayer(int i, NeuralNetConfiguration.Builder builder) {
                    builder.activationFunction("softmax");
                    builder.layer(new org.deeplearning4j.nn.conf.layers.OutputLayer());
                    builder.lossFunction(LossFunctions.LossFunction.MCXENT);
                  }
                })
            .build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.setListeners(Lists.<IterationListener>newArrayList(new ScoreIterationListener(1)));

    DataSetIterator iter = new IrisDataSetIterator(150, 150);

    DataSet next = iter.next();
    next.normalizeZeroMeanZeroUnitVariance();
    SplitTestAndTrain trainTest = next.splitTestAndTrain(110);
    network.setInput(trainTest.getTrain().getFeatureMatrix());
    network.setLabels(trainTest.getTrain().getLabels());
    network.init();
    network.fit(trainTest.getTrain());

    DataSet test = trainTest.getTest();
    Evaluation eval = new Evaluation();
    INDArray output = network.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());
  }
  @Test
  public void testWriteRead() {
    DataSetIterator iter = new IrisDataSetIterator(150, 150);
    String irisData = "irisData.dat";

    DataSet freshDataSet = iter.next(150);

    SerializationUtils.saveObject(freshDataSet, new File(irisData));

    DataSet readDataSet = SerializationUtils.readObject(new File(irisData));

    assertEquals(freshDataSet.getFeatureMatrix(), readDataSet.getFeatureMatrix());
    assertEquals(freshDataSet.getLabels(), readDataSet.getLabels());
    try {
      FileUtils.forceDelete(new File(irisData));
    } catch (IOException e) {
      e.printStackTrace();
    }
  }
  public static void main(String[] args) throws Exception {
    final int numRows = 28;
    final int numColumns = 28;
    int outputNum = 10;
    int numSamples = 60000;
    int batchSize = 100;
    int iterations = 10;
    int seed = 123;
    int listenerFreq = batchSize / 5;

    log.info("Load data....");
    DataSetIterator iter = new MnistDataSetIterator(batchSize, numSamples, true);

    log.info("Build model....");
    MultiLayerNetwork model = softMaxRegression(seed, iterations, numRows, numColumns, outputNum);
    //		// MultiLayerNetwork model = deepBeliefNetwork(seed, iterations,
    //		// numRows, numColumns, outputNum);
    //		MultiLayerNetwork model = deepConvNetwork(seed, iterations, numRows,
    //				numColumns, outputNum);

    model.init();
    model.setListeners(
        Collections.singletonList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    model.fit(iter); // achieves end to end pre-training

    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputNum);

    DataSetIterator testIter = new MnistDataSetIterator(100, 10000);
    while (testIter.hasNext()) {
      DataSet testMnist = testIter.next();
      INDArray predict2 = model.output(testMnist.getFeatureMatrix());
      eval.eval(testMnist.getLabels(), predict2);
    }

    log.info(eval.stats());
    log.info("****************Example finished********************");
  }
  public static void main(String[] args) throws Exception {

    // Set up network. 784 in/out (as MNIST images are 28x28).
    // 784 -> 250 -> 10 -> 250 -> 784
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .seed(12345)
            .iterations(1)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .learningRate(0.05)
            .l2(0.001)
            .list(4)
            .layer(
                0,
                new DenseLayer.Builder()
                    .nIn(784)
                    .nOut(250)
                    .weightInit(WeightInit.XAVIER)
                    .updater(Updater.ADAGRAD)
                    .activation("relu")
                    .build())
            .layer(
                1,
                new DenseLayer.Builder()
                    .nIn(250)
                    .nOut(10)
                    .weightInit(WeightInit.XAVIER)
                    .updater(Updater.ADAGRAD)
                    .activation("relu")
                    .build())
            .layer(
                2,
                new DenseLayer.Builder()
                    .nIn(10)
                    .nOut(250)
                    .weightInit(WeightInit.XAVIER)
                    .updater(Updater.ADAGRAD)
                    .activation("relu")
                    .build())
            .layer(
                3,
                new OutputLayer.Builder()
                    .nIn(250)
                    .nOut(784)
                    .weightInit(WeightInit.XAVIER)
                    .updater(Updater.ADAGRAD)
                    .activation("relu")
                    .lossFunction(LossFunctions.LossFunction.MSE)
                    .build())
            .pretrain(false)
            .backprop(true)
            .build();

    MultiLayerNetwork net = new MultiLayerNetwork(conf);
    net.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(1)));

    // Load data and split into training and testing sets. 40000 train, 10000 test
    DataSetIterator iter = new MnistDataSetIterator(100, 50000, false);

    List<INDArray> featuresTrain = new ArrayList<>();
    List<INDArray> featuresTest = new ArrayList<>();
    List<INDArray> labelsTest = new ArrayList<>();

    Random r = new Random(12345);
    while (iter.hasNext()) {
      DataSet ds = iter.next();
      SplitTestAndTrain split = ds.splitTestAndTrain(80, r); // 80/20 split (from miniBatch = 100)
      featuresTrain.add(split.getTrain().getFeatureMatrix());
      DataSet dsTest = split.getTest();
      featuresTest.add(dsTest.getFeatureMatrix());
      INDArray indexes =
          Nd4j.argMax(dsTest.getLabels(), 1); // Convert from one-hot representation -> index
      labelsTest.add(indexes);
    }

    // Train model:
    int nEpochs = 30;
    for (int epoch = 0; epoch < nEpochs; epoch++) {
      for (INDArray data : featuresTrain) {
        net.fit(data, data);
      }
      System.out.println("Epoch " + epoch + " complete");
    }

    // Evaluate the model on test data
    // Score each digit/example in test set separately
    // Then add triple (score, digit, and INDArray data) to lists and sort by score
    // This allows us to get best N and worst N digits for each type
    Map<Integer, List<Triple<Double, Integer, INDArray>>> listsByDigit = new HashMap<>();
    for (int i = 0; i < 10; i++)
      listsByDigit.put(i, new ArrayList<Triple<Double, Integer, INDArray>>());

    int count = 0;
    for (int i = 0; i < featuresTest.size(); i++) {
      INDArray testData = featuresTest.get(i);
      INDArray labels = labelsTest.get(i);
      int nRows = testData.rows();
      for (int j = 0; j < nRows; j++) {
        INDArray example = testData.getRow(j);
        int label = (int) labels.getDouble(j);
        double score = net.score(new DataSet(example, example));
        listsByDigit.get(label).add(new ImmutableTriple<>(score, count++, example));
      }
    }

    // Sort data by score, separately for each digit
    Comparator<Triple<Double, Integer, INDArray>> c =
        new Comparator<Triple<Double, Integer, INDArray>>() {
          @Override
          public int compare(
              Triple<Double, Integer, INDArray> o1, Triple<Double, Integer, INDArray> o2) {
            return Double.compare(o1.getLeft(), o2.getLeft());
          }
        };

    for (List<Triple<Double, Integer, INDArray>> list : listsByDigit.values()) {
      Collections.sort(list, c);
    }

    // Select the 5 best and 5 worst numbers (by reconstruction error) for each digit
    List<INDArray> best = new ArrayList<>(50);
    List<INDArray> worst = new ArrayList<>(50);
    for (int i = 0; i < 10; i++) {
      List<Triple<Double, Integer, INDArray>> list = listsByDigit.get(i);
      for (int j = 0; j < 5; j++) {
        best.add(list.get(j).getRight());
        worst.add(list.get(list.size() - j - 1).getRight());
      }
    }

    // Visualize the best and worst digits
    MNISTVisualizer bestVisualizer = new MNISTVisualizer(2.0, best, "Best (Low Rec. Error)");
    bestVisualizer.visualize();

    MNISTVisualizer worstVisualizer = new MNISTVisualizer(2.0, worst, "Worst (High Rec. Error)");
    worstVisualizer.visualize();
  }
  public static void main(String[] args) {

    final int numRows = 2;
    final int numColumns = 2;
    int nChannels = 1;
    int outputNum = 3;
    int numSamples = 150;
    int batchSize = 110;
    int iterations = 10;
    int splitTrainNum = 100;
    int seed = 123;
    int listenerFreq = 1;

    /** Set a neural network configuration with multiple layers */
    log.info("Load data....");
    DataSetIterator irisIter = new IrisDataSetIterator(batchSize, numSamples);
    DataSet iris = irisIter.next();
    iris.normalizeZeroMeanZeroUnitVariance();

    SplitTestAndTrain trainTest = iris.splitTestAndTrain(splitTrainNum, new Random(seed));

    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .seed(seed)
            .iterations(iterations)
            .batchSize(batchSize)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .constrainGradientToUnitNorm(true)
            .l2(2e-4)
            .regularization(true)
            .useDropConnect(true)
            .list(2)
            .layer(
                0,
                new ConvolutionLayer.Builder(new int[] {1, 1})
                    .nIn(nChannels)
                    .nOut(6)
                    .dropOut(0.5)
                    .activation("relu")
                    .weightInit(WeightInit.XAVIER)
                    .build())
            .layer(
                1,
                new OutputLayer.Builder(LossFunctions.LossFunction.MCXENT)
                    .nIn(6)
                    .nOut(outputNum)
                    .weightInit(WeightInit.XAVIER)
                    .activation("softmax")
                    .build())
            .inputPreProcessor(0, new FeedForwardToCnnPreProcessor(numRows, numColumns, nChannels))
            .inputPreProcessor(1, new CnnToFeedForwardPreProcessor())
            .backprop(true)
            .pretrain(false)
            .build();

    log.info("Build model....");
    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();
    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    model.fit(trainTest.getTrain());

    log.info("Evaluate weights....");
    for (org.deeplearning4j.nn.api.Layer layer : model.getLayers()) {
      INDArray w = layer.getParam(DefaultParamInitializer.WEIGHT_KEY);
      log.info("Weights: " + w);
    }

    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputNum);
    INDArray output = model.output(trainTest.getTest().getFeatureMatrix());
    eval.eval(trainTest.getTest().getLabels(), output);
    log.info(eval.stats());

    log.info("****************Example finished********************");
  }
  public void trainMLP() throws Exception {
    Nd4j.ENFORCE_NUMERICAL_STABILITY = true;
    final int numRows = 28;
    final int numColumns = 28;
    int outputNum = 10;
    int numSamples = 10000;
    int batchSize = 500;
    int iterations = 10;
    int seed = 123;
    int listenerFreq = iterations / 5;
    int splitTrainNum = (int) (batchSize * .8);
    DataSet mnist;
    SplitTestAndTrain trainTest;
    DataSet trainInput;
    List<INDArray> testInput = new ArrayList<>();
    List<INDArray> testLabels = new ArrayList<>();

    log.info("Load data....");
    DataSetIterator mnistIter = new MnistDataSetIterator(batchSize, numSamples, true);

    log.info("Build model....");
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .seed(seed)
            .optimizationAlgo(OptimizationAlgorithm.STOCHASTIC_GRADIENT_DESCENT)
            .iterations(iterations)
            .gradientNormalization(GradientNormalization.RenormalizeL2PerLayer)
            .learningRate(1e-1f)
            .momentum(0.5)
            .momentumAfter(Collections.singletonMap(3, 0.9))
            .useDropConnect(true)
            .list(2)
            .layer(
                0,
                new DenseLayer.Builder()
                    .nIn(numRows * numColumns)
                    .nOut(1000)
                    .activation("relu")
                    .weightInit(WeightInit.XAVIER)
                    .build())
            .layer(
                1,
                new OutputLayer.Builder(LossFunction.NEGATIVELOGLIKELIHOOD)
                    .nIn(1000)
                    .nOut(outputNum)
                    .activation("softmax")
                    .weightInit(WeightInit.XAVIER)
                    .build())
            .build();

    MultiLayerNetwork model = new MultiLayerNetwork(conf);
    model.init();
    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));

    log.info("Train model....");
    model.setListeners(Arrays.asList((IterationListener) new ScoreIterationListener(listenerFreq)));
    while (mnistIter.hasNext()) {
      mnist = mnistIter.next();
      trainTest =
          mnist.splitTestAndTrain(splitTrainNum, new Random(seed)); // train set that is the result
      trainInput = trainTest.getTrain(); // get feature matrix and labels for training
      testInput.add(trainTest.getTest().getFeatureMatrix());
      testLabels.add(trainTest.getTest().getLabels());
      model.fit(trainInput);
    }

    log.info("Evaluate model....");
    Evaluation eval = new Evaluation(outputNum);
    for (int i = 0; i < testInput.size(); i++) {
      INDArray output = model.output(testInput.get(i));
      eval.eval(testLabels.get(i), output);
    }

    log.info(eval.stats());
    log.info("****************Example finished********************");
  }
  @Test
  public void testDbn() throws Exception {
    Nd4j.MAX_SLICES_TO_PRINT = -1;
    Nd4j.MAX_ELEMENTS_PER_SLICE = -1;
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .iterations(100)
            .layer(new org.deeplearning4j.nn.conf.layers.RBM())
            .weightInit(WeightInit.DISTRIBUTION)
            .dist(new UniformDistribution(0, 1))
            .activationFunction("tanh")
            .momentum(0.9)
            .optimizationAlgo(OptimizationAlgorithm.LBFGS)
            .constrainGradientToUnitNorm(true)
            .k(1)
            .regularization(true)
            .l2(2e-4)
            .visibleUnit(org.deeplearning4j.nn.conf.layers.RBM.VisibleUnit.GAUSSIAN)
            .hiddenUnit(org.deeplearning4j.nn.conf.layers.RBM.HiddenUnit.RECTIFIED)
            .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
            .nIn(4)
            .nOut(3)
            .list(2)
            .hiddenLayerSizes(3)
            .override(1, new ClassifierOverride(1))
            .build();

    NeuralNetConfiguration conf2 =
        new NeuralNetConfiguration.Builder()
            .layer(new org.deeplearning4j.nn.conf.layers.RBM())
            .nIn(784)
            .nOut(600)
            .applySparsity(true)
            .sparsity(0.1)
            .build();

    Layer l =
        LayerFactories.getFactory(conf2)
            .create(conf2, Arrays.<IterationListener>asList(new ScoreIterationListener(2)), 0);

    MultiLayerNetwork d = new MultiLayerNetwork(conf);

    DataSetIterator iter = new IrisDataSetIterator(150, 150);

    DataSet next = iter.next();

    Nd4j.writeTxt(next.getFeatureMatrix(), "iris.txt", "\t");

    next.normalizeZeroMeanZeroUnitVariance();

    SplitTestAndTrain testAndTrain = next.splitTestAndTrain(110);
    DataSet train = testAndTrain.getTrain();

    d.fit(train);

    DataSet test = testAndTrain.getTest();

    Evaluation eval = new Evaluation();
    INDArray output = d.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());
  }