コード例 #1
0
  @Test
  public void testDbnFaces() {
    DataSetIterator iter = new LFWDataSetIterator(28, 28);

    DataSet next = iter.next();
    next.normalizeZeroMeanZeroUnitVariance();

    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .nIn(next.numInputs())
            .nOut(next.numOutcomes())
            .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
            .constrainGradientToUnitNorm(true)
            .weightInit(WeightInit.DISTRIBUTION)
            .dist(new NormalDistribution(0, 1e-5))
            .iterations(10)
            .learningRate(1e-3)
            .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
            .visibleUnit(RBM.VisibleUnit.GAUSSIAN)
            .hiddenUnit(RBM.HiddenUnit.RECTIFIED)
            .layer(new RBM())
            .list(4)
            .hiddenLayerSizes(600, 250, 100)
            .override(3, new ClassifierOverride())
            .build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.setListeners(
        Arrays.<IterationListener>asList(
            new ScoreIterationListener(10), new NeuralNetPlotterIterationListener(1)));
    network.fit(next);
  }
コード例 #2
0
  @Test
  public void testGradientWithAsList() {
    MultiLayerNetwork net1 = new MultiLayerNetwork(getConf());
    MultiLayerNetwork net2 = new MultiLayerNetwork(getConf());
    net1.init();
    net2.init();

    DataSet x1 = new IrisDataSetIterator(1, 150).next();
    DataSet all = new IrisDataSetIterator(150, 150).next();
    DataSet x2 = all.asList().get(0);

    // x1 and x2 contain identical data
    assertArrayEquals(asFloat(x1.getFeatureMatrix()), asFloat(x2.getFeatureMatrix()), 0.0f); // OK
    assertArrayEquals(asFloat(x1.getLabels()), asFloat(x2.getLabels()), 0.0f); // OK
    assertEquals(x1, x2); // Fails, DataSet doesn't override Object.equals()

    // Set inputs/outputs so gradient can be calculated:
    net1.feedForward(x1.getFeatureMatrix());
    net2.feedForward(x2.getFeatureMatrix());
    ((OutputLayer) net1.getLayers()[1]).setLabels(x1.getLabels());
    ((OutputLayer) net2.getLayers()[1]).setLabels(x2.getLabels());

    net1.gradient(); // OK
    net2.gradient(); // IllegalArgumentException: Buffers must fill up specified length 29
  }
コード例 #3
0
  @Test
  public void testGravesLSTMInit() {
    int nIn = 8;
    int nOut = 25;
    int nHiddenUnits = 17;
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .list(2)
            .layer(
                0,
                new org.deeplearning4j.nn.conf.layers.GravesLSTM.Builder()
                    .nIn(nIn)
                    .nOut(nHiddenUnits)
                    .weightInit(WeightInit.DISTRIBUTION)
                    .activation("tanh")
                    .build())
            .layer(
                1,
                new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS)
                    .nIn(nHiddenUnits)
                    .nOut(nOut)
                    .weightInit(WeightInit.DISTRIBUTION)
                    .activation("tanh")
                    .build())
            .build();
    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();

    // Ensure that we have the correct number weights and biases, that these have correct shape etc.
    Layer layer = network.getLayer(0);
    assertTrue(layer instanceof GravesLSTM);

    Map<String, INDArray> paramTable = layer.paramTable();
    assertTrue(paramTable.size() == 3); // 2 sets of weights, 1 set of biases

    INDArray recurrentWeights = paramTable.get(GravesLSTMParamInitializer.RECURRENT_WEIGHT_KEY);
    assertArrayEquals(
        recurrentWeights.shape(),
        new int[] {
          nHiddenUnits, 4 * nHiddenUnits + 3
        }); // Should be shape: [layerSize,4*layerSize+3]
    INDArray inputWeights = paramTable.get(GravesLSTMParamInitializer.INPUT_WEIGHT_KEY);
    assertArrayEquals(
        inputWeights.shape(),
        new int[] {nIn, 4 * nHiddenUnits}); // Should be shape: [nIn,4*layerSize]
    INDArray biases = paramTable.get(GravesLSTMParamInitializer.BIAS_KEY);
    assertArrayEquals(
        biases.shape(), new int[] {1, 4 * nHiddenUnits}); // Should be shape: [1,4*layerSize]

    // Want forget gate biases to be initialized to > 0. See parameter initializer for details
    INDArray forgetGateBiases =
        biases.get(new INDArrayIndex[] {NDArrayIndex.interval(nHiddenUnits, 2 * nHiddenUnits)});
    assertTrue(forgetGateBiases.gt(0).sum(Integer.MAX_VALUE).getDouble(0) == nHiddenUnits);

    int nParams = recurrentWeights.length() + inputWeights.length() + biases.length();
    assertTrue(nParams == layer.numParams());
  }
コード例 #4
0
 @Test
 public void testFeedForwardActivationsAndDerivatives() {
   MultiLayerNetwork network = new MultiLayerNetwork(getConf());
   network.init();
   DataSet data = new IrisDataSetIterator(1, 150).next();
   network.fit(data);
   Pair result = network.feedForwardActivationsAndDerivatives();
   List<INDArray> first = (List) result.getFirst();
   List<INDArray> second = (List) result.getSecond();
   assertEquals(first.size(), second.size());
 }
コード例 #5
0
  @Test
  public void testMultiLayerNinNout() {
    // [Nin,Nout]: [4,13], [13,3]
    int[] hiddenLayerSizes1 = {13};
    int[] expNin1 = {4, 13};
    int[] expNout1 = {13, 3};
    MultiLayerConfiguration conf1 = getNinNoutIrisConfig(hiddenLayerSizes1);
    MultiLayerNetwork net1 = new MultiLayerNetwork(conf1);
    net1.init();
    checkNinNoutForEachLayer(expNin1, expNout1, conf1, net1);

    int[] hiddenLayerSizes2 = {5, 7};
    int[] expNin2 = {4, 5, 7};
    int[] expNout2 = {5, 7, 3};
    MultiLayerConfiguration conf2 = getNinNoutIrisConfig(hiddenLayerSizes2);
    MultiLayerNetwork net2 = new MultiLayerNetwork(conf2);
    net2.init();
    checkNinNoutForEachLayer(expNin2, expNout2, conf2, net2);

    int[] hiddenLayerSizes3 = {5, 7, 9};
    int[] expNin3 = {4, 5, 7, 9};
    int[] expNout3 = {5, 7, 9, 3};
    MultiLayerConfiguration conf3 = getNinNoutIrisConfig(hiddenLayerSizes3);
    MultiLayerNetwork net3 = new MultiLayerNetwork(conf3);
    net3.init();
    checkNinNoutForEachLayer(expNin3, expNout3, conf3, net3);

    int[] hiddenLayerSizes4 = {5, 7, 9, 11, 13, 15, 17};
    int[] expNin4 = {4, 5, 7, 9, 11, 13, 15, 17};
    int[] expNout4 = {5, 7, 9, 11, 13, 15, 17, 3};
    MultiLayerConfiguration conf4 = getNinNoutIrisConfig(hiddenLayerSizes4);
    MultiLayerNetwork net4 = new MultiLayerNetwork(conf4);
    net4.init();
    checkNinNoutForEachLayer(expNin4, expNout4, conf4, net4);
  }
コード例 #6
0
  @Test
  public void testGRUInit() {
    int nIn = 8;
    int nOut = 25;
    int nHiddenUnits = 17;
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .list(2)
            .layer(
                0,
                new org.deeplearning4j.nn.conf.layers.GRU.Builder()
                    .nIn(nIn)
                    .nOut(nHiddenUnits)
                    .weightInit(WeightInit.DISTRIBUTION)
                    .build())
            .layer(
                1,
                new OutputLayer.Builder(LossFunctions.LossFunction.SQUARED_LOSS)
                    .nIn(nHiddenUnits)
                    .nOut(nOut)
                    .weightInit(WeightInit.DISTRIBUTION)
                    .build())
            .build();
    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();

    // Ensure that we have the correct number weights and biases, that these have correct shape etc.
    Layer layer = network.getLayer(0);
    assertTrue(layer instanceof GRU);

    Map<String, INDArray> paramTable = layer.paramTable();
    assertTrue(paramTable.size() == 3); // 2 sets of weights, 1 set of biases

    INDArray recurrentWeights = paramTable.get(GRUParamInitializer.RECURRENT_WEIGHT_KEY);
    assertArrayEquals(
        recurrentWeights.shape(),
        new int[] {nHiddenUnits, 3 * nHiddenUnits}); // Should be shape: [layerSize,3*layerSize]
    INDArray inputWeights = paramTable.get(GRUParamInitializer.INPUT_WEIGHT_KEY);
    assertArrayEquals(
        inputWeights.shape(),
        new int[] {nIn, 3 * nHiddenUnits}); // Should be shape: [nIn,3*layerSize]
    INDArray biases = paramTable.get(GRUParamInitializer.BIAS_KEY);
    assertArrayEquals(
        biases.shape(), new int[] {1, 3 * nHiddenUnits}); // Should be shape: [1,3*layerSize]

    int nParams = recurrentWeights.length() + inputWeights.length() + biases.length();
    assertTrue(nParams == layer.numParams());
  }
コード例 #7
0
  @Test
  public void testBackProp() {
    Nd4j.getRandom().setSeed(123);
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .optimizationAlgo(OptimizationAlgorithm.CONJUGATE_GRADIENT)
            .iterations(10)
            .weightInit(WeightInit.XAVIER)
            .dist(new UniformDistribution(0, 1))
            .activationFunction("tanh")
            .nIn(4)
            .nOut(3)
            .layer(new org.deeplearning4j.nn.conf.layers.OutputLayer())
            .list(3)
            .backward(true)
            .pretrain(false)
            .hiddenLayerSizes(new int[] {3, 2})
            .override(
                2,
                new ConfOverride() {
                  @Override
                  public void overrideLayer(int i, NeuralNetConfiguration.Builder builder) {
                    builder.activationFunction("softmax");
                    builder.layer(new org.deeplearning4j.nn.conf.layers.OutputLayer());
                    builder.lossFunction(LossFunctions.LossFunction.MCXENT);
                  }
                })
            .build();

    MultiLayerNetwork network = new MultiLayerNetwork(conf);
    network.init();
    network.setListeners(Lists.<IterationListener>newArrayList(new ScoreIterationListener(1)));

    DataSetIterator iter = new IrisDataSetIterator(150, 150);

    DataSet next = iter.next();
    next.normalizeZeroMeanZeroUnitVariance();
    SplitTestAndTrain trainTest = next.splitTestAndTrain(110);
    network.setInput(trainTest.getTrain().getFeatureMatrix());
    network.setLabels(trainTest.getTrain().getLabels());
    network.init();
    network.fit(trainTest.getTrain());

    DataSet test = trainTest.getTest();
    Evaluation eval = new Evaluation();
    INDArray output = network.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());
  }
コード例 #8
0
  @Test
  public void testSetParams() {
    Nd4j.MAX_ELEMENTS_PER_SLICE = Integer.MAX_VALUE;
    Nd4j.MAX_SLICES_TO_PRINT = Integer.MAX_VALUE;

    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .nIn(4)
            .nOut(3)
            .layer(new org.deeplearning4j.nn.conf.layers.RBM())
            .activationFunction("tanh")
            .list(2)
            .hiddenLayerSizes(3)
            .override(1, new ClassifierOverride(1))
            .build();

    MultiLayerNetwork network3 = new MultiLayerNetwork(conf);
    network3.init();

    INDArray params = network3.params();
    network3.setParameters(params);
    INDArray params4 = network3.params();
    assertEquals(params, params4);
  }
コード例 #9
0
  private static void checkNinNoutForEachLayer(
      int[] expNin, int[] expNout, MultiLayerConfiguration conf, MultiLayerNetwork network) {

    // Check configuration
    for (int i = 0; i < expNin.length; i++) {
      NeuralNetConfiguration layerConf = conf.getConf(i);
      assertTrue(layerConf.getNIn() == expNin[i]);
      assertTrue(layerConf.getNOut() == expNout[i]);
    }

    // Check Layer
    for (int i = 0; i < expNin.length; i++) {
      Layer layer = network.getLayers()[i];
      assertTrue(layer.conf().getNIn() == expNin[i]);
      assertTrue(layer.conf().getNOut() == expNout[i]);
      int[] weightShape = layer.getParam(DefaultParamInitializer.WEIGHT_KEY).shape();
      assertTrue(weightShape[0] == expNin[i]);
      assertTrue(weightShape[1] == expNout[i]);
    }
  }
コード例 #10
0
  @Test
  public void testDbn() throws Exception {
    Nd4j.MAX_SLICES_TO_PRINT = -1;
    Nd4j.MAX_ELEMENTS_PER_SLICE = -1;
    MultiLayerConfiguration conf =
        new NeuralNetConfiguration.Builder()
            .iterations(100)
            .layer(new org.deeplearning4j.nn.conf.layers.RBM())
            .weightInit(WeightInit.DISTRIBUTION)
            .dist(new UniformDistribution(0, 1))
            .activationFunction("tanh")
            .momentum(0.9)
            .optimizationAlgo(OptimizationAlgorithm.LBFGS)
            .constrainGradientToUnitNorm(true)
            .k(1)
            .regularization(true)
            .l2(2e-4)
            .visibleUnit(org.deeplearning4j.nn.conf.layers.RBM.VisibleUnit.GAUSSIAN)
            .hiddenUnit(org.deeplearning4j.nn.conf.layers.RBM.HiddenUnit.RECTIFIED)
            .lossFunction(LossFunctions.LossFunction.RMSE_XENT)
            .nIn(4)
            .nOut(3)
            .list(2)
            .hiddenLayerSizes(3)
            .override(1, new ClassifierOverride(1))
            .build();

    NeuralNetConfiguration conf2 =
        new NeuralNetConfiguration.Builder()
            .layer(new org.deeplearning4j.nn.conf.layers.RBM())
            .nIn(784)
            .nOut(600)
            .applySparsity(true)
            .sparsity(0.1)
            .build();

    Layer l =
        LayerFactories.getFactory(conf2)
            .create(conf2, Arrays.<IterationListener>asList(new ScoreIterationListener(2)), 0);

    MultiLayerNetwork d = new MultiLayerNetwork(conf);

    DataSetIterator iter = new IrisDataSetIterator(150, 150);

    DataSet next = iter.next();

    Nd4j.writeTxt(next.getFeatureMatrix(), "iris.txt", "\t");

    next.normalizeZeroMeanZeroUnitVariance();

    SplitTestAndTrain testAndTrain = next.splitTestAndTrain(110);
    DataSet train = testAndTrain.getTrain();

    d.fit(train);

    DataSet test = testAndTrain.getTest();

    Evaluation eval = new Evaluation();
    INDArray output = d.output(test.getFeatureMatrix());
    eval.eval(test.getLabels(), output);
    log.info("Score " + eval.stats());
  }