// TODO remove/move, technically this is testing Nd4j functionality @Test public void testCreateFeatureMapMethod() { Layer layer = getContainedConfig(); INDArray input = getContainedData(); int inputWidth = input.shape()[0]; int featureMapWidth = (inputWidth + layer.conf().getPadding()[0] * 2 - layer.conf().getKernelSize()[0]) / layer.conf().getStride()[0] + 1; INDArray expectedOutput = Nd4j.create( new double[] { 1, 1, 1, 1, 3, 3, 3, 3, 1, 1, 1, 1, 3, 3, 3, 3, 1, 1, 1, 1, 3, 3, 3, 3, 1, 1, 1, 1, 3, 3, 3, 3, 2, 2, 2, 2, 4, 4, 4, 4, 2, 2, 2, 2, 4, 4, 4, 4, 2, 2, 2, 2, 4, 4, 4, 4, 2, 2, 2, 2, 4, 4, 4, 4 }, new int[] {1, 1, 2, 2, 4, 4}); layer.setInput(input); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer layer2 = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) layer; INDArray featureMaps = layer2.createFeatureMapColumn(); assertEquals(featureMapWidth, featureMaps.shape()[4]); assertEquals(expectedOutput.shape(), featureMaps.shape()); assertEquals(expectedOutput, featureMaps); }
// note precision is off on this test but the numbers are close // investigation in a future release should determine how to resolve @Test public void testCalculateDeltaContained() { Layer layer = getContainedConfig(); INDArray input = getContainedData(); INDArray col = getContainedCol(); INDArray epsilon = Nd4j.ones(1, 2, 4, 4); INDArray expectedOutput = Nd4j.create( new double[] { 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.00039383, 0.00039383, 0.00039383, 0.00039383 }, new int[] {1, 2, 4, 4}); layer.setInput(input); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer layer2 = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) layer; layer2.setCol(col); INDArray delta = layer2.calculateDelta(epsilon); assertArrayEquals(expectedOutput.shape(), delta.shape()); assertEquals(expectedOutput, delta); }
// note precision is off on this test but the numbers are close // investigation in a future release should determine how to resolve @Test public void testBackpropResultsContained() { Layer layer = getContainedConfig(); INDArray input = getContainedData(); INDArray col = getContainedCol(); INDArray epsilon = Nd4j.ones(1, 2, 4, 4); INDArray expectedBiasGradient = Nd4j.create(new double[] {0.16608272, 0.16608272}, new int[] {1, 2}); INDArray expectedWeightGradient = Nd4j.create( new double[] { 0.17238397, 0.17238397, 0.33846668, 0.33846668, 0.17238397, 0.17238397, 0.33846668, 0.33846668 }, new int[] {2, 1, 2, 2}); INDArray expectedEpsilon = Nd4j.create( new double[] { 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0., 0., 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0., 0., 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0., 0., 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0.02036651, 0., 0., 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0., 0., 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0.00039383, 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0., 0. }, new int[] {1, 1, 8, 8}); layer.setInput(input); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer layer2 = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) layer; layer2.setCol(col); Pair<Gradient, INDArray> pair = layer2.backpropGradient(epsilon); assertArrayEquals(expectedEpsilon.shape(), pair.getSecond().shape()); assertArrayEquals(expectedWeightGradient.shape(), pair.getFirst().getGradientFor("W").shape()); assertArrayEquals(expectedBiasGradient.shape(), pair.getFirst().getGradientFor("b").shape()); assertEquals(expectedEpsilon, pair.getSecond()); assertEquals(expectedWeightGradient, pair.getFirst().getGradientFor("W")); assertEquals(expectedBiasGradient, pair.getFirst().getGradientFor("b")); }
@Test public void testPreOutputMethodContained() { Layer layer = getContainedConfig(); INDArray col = getContainedCol(); INDArray expectedOutput = Nd4j.create( new double[] { 4., 4., 4., 4., 8., 8., 8., 8., 4., 4., 4., 4., 8., 8., 8., 8., 4., 4., 4., 4., 8., 8., 8., 8., 4., 4., 4., 4., 8., 8., 8., 8 }, new int[] {1, 2, 4, 4}); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer layer2 = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) layer; layer2.setCol(col); INDArray activation = layer2.preOutput(true); assertArrayEquals(expectedOutput.shape(), activation.shape()); assertEquals(expectedOutput, activation); }
@Test public void testCalculateDelta() { Layer layer = getContainedConfig(); INDArray col = getContainedCol(); INDArray expectedOutput = Nd4j.create( new double[] { -12., -12., -12., -12., -56., -56., -56., -56., -12., -12., -12., -12., -56., -56., -56., -56., -12., -12., -12., -12., -56., -56., -56., -56., -12., -12., -12., -12., -56., -56., -56., -56. }, new int[] {1, 2, 4, 4}); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer layer2 = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) layer; layer2.setCol(col); INDArray delta = layer2.calculateDelta(epsilon); assertEquals(expectedOutput.shape(), delta.shape()); assertEquals(expectedOutput, delta); }
@Test public void testBackpropResults() { Layer layer = getContainedConfig(); INDArray col = getContainedCol(); INDArray expectedWeightGradient = Nd4j.create( new double[] {-1440., -1440., -1984., -1984., -1440., -1440., -1984., -1984.}, new int[] {2, 1, 2, 2}); INDArray expectedBiasGradient = Nd4j.create( new double[] {-544., -544.}, new int[] { 2, }); INDArray expectedEpsilon = Nd4j.create( new double[] { -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -12., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56., -56. }, new int[] {1, 1, 8, 8}); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer layer2 = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) layer; layer2.setCol(col); Pair<Gradient, INDArray> pair = layer2.backpropGradient(epsilon); assertEquals(expectedEpsilon.shape(), pair.getSecond().shape()); assertEquals(expectedWeightGradient.shape(), pair.getFirst().getGradientFor("W").shape()); assertEquals(expectedBiasGradient.shape(), pair.getFirst().getGradientFor("b").shape()); assertEquals(expectedEpsilon, pair.getSecond()); assertEquals(expectedWeightGradient, pair.getFirst().getGradientFor("W")); assertEquals(expectedBiasGradient, pair.getFirst().getGradientFor("b")); }
public static void testAccuracy() { double[][][][] data = { { { {1.0, 2.0, 3.0}, {4.0, 5.0, 6.0}, {7.0, 8.0, 9.0} } } }; double[] flat = ArrayUtil.flattenDoubleArray(data); int[] shape = {1, 1, 3, 3}; INDArray input = Nd4j.create(flat, shape, 'c'); TestCase testCase = new TestCase(1, 1, 2, 2, 1, 1, 0, 0, 3, 3); ConvolutionLayer convolutionLayerBuilder = new ConvolutionLayer.Builder(testCase.kW, testCase.kH) .nIn(testCase.nInputPlane) .stride(testCase.dW, testCase.dH) .padding(testCase.padW, testCase.padH) .nOut(testCase.nOutputPlane) .build(); MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list().layer(0, convolutionLayerBuilder); MultiLayerConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); model.setInput(input); model.getLayer(0).setInput(input); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer convolutionLayer = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) model.getLayer(0); System.out.println(convolutionLayer.params()); System.out.println(convolutionLayer.preOutput(false)); }
public static void testForward() { for (TestCase testCase : allTestCases) { try (BufferedWriter writer = new BufferedWriter(new FileWriter(new File("dl4jPerformance.csv"), true))) { ConvolutionLayer convolutionLayerBuilder = new ConvolutionLayer.Builder(testCase.kW, testCase.kH) .nIn(testCase.nInputPlane) .stride(testCase.dW, testCase.dH) .padding(testCase.padW, testCase.padH) .nOut(testCase.nOutputPlane) .build(); MultiLayerConfiguration.Builder builder = new NeuralNetConfiguration.Builder().list().layer(0, convolutionLayerBuilder); MultiLayerConfiguration conf = builder.build(); MultiLayerNetwork model = new MultiLayerNetwork(conf); model.init(); INDArray input = Nd4j.rand( seed, batchSize, testCase.nInputPlane, testCase.inputWidth, testCase.inputHeight); model.setInput(input); model.getLayer(0).setInput(input); org.deeplearning4j.nn.layers.convolution.ConvolutionLayer convolutionLayer = (org.deeplearning4j.nn.layers.convolution.ConvolutionLayer) model.getLayer(0); double start = System.nanoTime(); for (int i = 0; i < forwardIterations; i++) { convolutionLayer.preOutput(false); } double end = System.nanoTime(); double timeMillis = (end - start) / 1e6 / forwardIterations; writer.write( "Convolution(" + testCase.nInputPlane + " " + testCase.nOutputPlane + " " + testCase.kW + " " + testCase.kH + " " + testCase.dW + " " + testCase.dH + " " + testCase.padW + " " + testCase.padH + " " + testCase.inputWidth + " " + testCase.inputHeight + ") " + " forward, " + timeMillis + "\n"); } catch (Exception ex) { ex.printStackTrace(); } } }