コード例 #1
0
  @Test
  public void testMmul() {
    IComplexNDArray n = Nd4j.createComplex(Nd4j.linspace(1, 10, 10));
    IComplexNDArray transposed = n.transpose();
    assertEquals(true, n.isRowVector());
    assertEquals(true, transposed.isColumnVector());

    INDArray innerProduct = n.mmul(transposed);

    INDArray scalar = Nd4j.scalar(Nd4j.createComplexNumber(385, 0));
    assertEquals(getFailureMessage(), scalar, innerProduct);

    INDArray outerProduct = transposed.mmul(n);
    assertEquals(true, Shape.shapeEquals(new int[] {10, 10}, outerProduct.shape()));

    IComplexNDArray d3 =
        Nd4j.createComplex(ComplexUtil.complexNumbersFor(new double[] {1, 2})).reshape(2, 1);
    IComplexNDArray d4 = Nd4j.createComplex(ComplexUtil.complexNumbersFor(new double[] {3, 4}));
    INDArray resultNDArray = d3.mmul(d4);
    INDArray result =
        Nd4j.createComplex(
            new IComplexNumber[][] {
              {Nd4j.createComplexNumber(3, 0), Nd4j.createComplexNumber(4, 0)},
              {Nd4j.createComplexNumber(6, 0), Nd4j.createComplexNumber(8, 0)}
            });

    assertEquals(getFailureMessage(), result, resultNDArray);
  }
コード例 #2
0
  @Test
  public void testWrap() {
    IComplexNDArray c = Nd4j.createComplex(Nd4j.linspace(1, 4, 4).reshape(2, 2));
    assertEquals(true, Arrays.equals(new int[] {2, 2}, c.shape()));

    IComplexNDArray vec = Nd4j.createComplex(Nd4j.linspace(1, 4, 4));
    assertEquals(true, vec.isVector());
    assertEquals(true, Shape.shapeEquals(new int[] {4}, vec.shape()));
  }
コード例 #3
0
ファイル: CpuLevel3.java プロジェクト: KillEdision/nd4j
  @Override
  protected void sgemm(
      char Order,
      char TransA,
      char TransB,
      int M,
      int N,
      int K,
      float alpha,
      INDArray A,
      int lda,
      INDArray B,
      int ldb,
      float beta,
      INDArray C,
      int ldc) {
    A = Shape.toOffsetZero(A);
    B = Shape.toOffsetZero(B);

    DataBuffer aData = A.data();
    DataBuffer bData = B.data();

    float[] cData = getFloatData(C);
    BLAS.getInstance()
        .sgemm(
            String.valueOf(TransA),
            String.valueOf(TransB),
            M,
            N,
            K,
            alpha,
            aData.asFloat(),
            getBlasOffset(A),
            lda,
            bData.asFloat(),
            getBlasOffset(B),
            ldb,
            beta,
            cData,
            getBlasOffset(C),
            ldc);
    setData(cData, C);
  }
コード例 #4
0
ファイル: CpuLevel3.java プロジェクト: KillEdision/nd4j
  @Override
  protected void zgemm(
      char Order,
      char TransA,
      char TransB,
      int M,
      int N,
      int K,
      IComplexDouble alpha,
      IComplexNDArray A,
      int lda,
      IComplexNDArray B,
      int ldb,
      IComplexDouble beta,
      IComplexNDArray C,
      int ldc) {
    A = (IComplexNDArray) Shape.toOffsetZero(A);
    B = (IComplexNDArray) Shape.toOffsetZero(B);
    C = (IComplexNDArray) Shape.toOffsetZero(C);

    double[] cData = getDoubleData(C);
    NativeBlas.zgemm(
        TransA,
        TransB,
        M,
        N,
        K,
        CpuComplex.getComplexDouble(alpha),
        getDoubleData(A),
        getBlasOffset(A),
        lda,
        getDoubleData(B),
        getBlasOffset(B),
        ldb,
        CpuComplex.getComplexDouble(beta),
        cData,
        getBlasOffset(C),
        ldc);
    setData(cData, C);
  }
コード例 #5
0
ファイル: AdaGrad.java プロジェクト: smarthi/nd4j
 @Override
 public void setStateViewArray(
     INDArray viewArray, int[] gradientShape, char gradientOrder, boolean initialize) {
   if (!viewArray.isRowVector())
     throw new IllegalArgumentException("Invalid input: expect row vector input");
   if (initialize) viewArray.assign(epsilon);
   this.historicalGradient = viewArray;
   // Reshape to match the expected shape of the input gradient arrays
   this.historicalGradient =
       Shape.newShapeNoCopy(this.historicalGradient, gradientShape, gradientOrder == 'f');
   if (historicalGradient == null)
     throw new IllegalStateException("Could not correctly reshape gradient view array");
 }
コード例 #6
0
  @Override
  public INDArray backprop(INDArray output, int miniBatchSize) {
    if (output == null)
      return null; // In a few cases: output may be null, and this is valid. Like time series data
    // -> embedding layer
    // Need to reshape FeedForward layer epsilons (2d) to 3d (for use in RNN layer backprop
    // calculations)
    if (output.rank() != 2)
      throw new IllegalArgumentException(
          "Invalid input: expect NDArray with rank 2 (i.e., epsilons from feed forward layer)");
    if (output.ordering() == 'c') output = Shape.toOffsetZeroCopy(output, 'f');

    int[] shape = output.shape();
    INDArray reshaped = output.reshape('f', miniBatchSize, shape[0] / miniBatchSize, shape[1]);
    return reshaped.permute(0, 2, 1);
  }
コード例 #7
0
ファイル: AdaGrad.java プロジェクト: smarthi/nd4j
  public AdaGrad createSubset(int index) {
    if (historicalGradient == null) this.historicalGradient = Nd4j.ones(shape);

    if (Shape.isMatrix(shape)) {
      AdaGrad a = new AdaGrad(1, historicalGradient.columns());
      // grab only the needed elements
      INDArray slice = historicalGradient.slice(index).dup();
      a.historicalGradient = slice;
      a.setLearningRate(learningRate);
      return a;
    } else {
      AdaGrad a = new AdaGrad(1, 1);
      // grab only the needed elements
      INDArray slice = Nd4j.scalar(historicalGradient.getDouble(index));
      a.historicalGradient = slice;
      a.setLearningRate(learningRate);
      return a;
    }
  }
コード例 #8
0
ファイル: BaseShapeInfoProvider.java プロジェクト: Zixxy/nd4j
 @Override
 public DataBuffer createShapeInformation(
     int[] shape, int[] stride, int offset, int elementWiseStride, char order) {
   return Shape.createShapeInformation(shape, stride, offset, elementWiseStride, order);
 }
コード例 #9
0
  @Override
  public Pair<Gradient, INDArray> backpropGradient(
      INDArray input,
      INDArray weights,
      INDArray delta,
      int[] kernel,
      int[] strides,
      int[] pad,
      INDArray biasGradView,
      INDArray weightGradView,
      String afn) {
    int miniBatch = input.size(0);
    int inH = input.size(2);
    int inW = input.size(3);

    int outDepth = weights.size(0);
    int inDepth = weights.size(1);
    int kH = weights.size(2);
    int kW = weights.size(3);

    int outH = Convolution.outSize(inH, kernel[0], strides[0], pad[0], false);
    int outW = Convolution.outSize(inW, kernel[1], strides[1], pad[1], false);

    if (!Shape.strideDescendingCAscendingF(delta)) {
      // apparently not supported by cuDNN
      delta = delta.dup();
    }

    int[] srcStride = input.stride();
    int[] deltaStride = delta.stride();
    int[] algo = new int[1];
    checkCudnn(
        cudnnSetTensor4dDescriptorEx(
            cudnnContext.srcTensorDesc,
            dataType,
            miniBatch,
            inDepth,
            inH,
            inW,
            srcStride[0],
            srcStride[1],
            srcStride[2],
            srcStride[3]));
    checkCudnn(
        cudnnSetTensor4dDescriptorEx(
            cudnnContext.deltaTensorDesc,
            dataType,
            miniBatch,
            outDepth,
            outH,
            outW,
            deltaStride[0],
            deltaStride[1],
            deltaStride[2],
            deltaStride[3]));
    checkCudnn(
        cudnnSetConvolution2dDescriptor(
            cudnnContext.convDesc,
            pad[0],
            pad[1],
            strides[0],
            strides[1],
            1,
            1,
            CUDNN_CROSS_CORRELATION));
    checkCudnn(
        cudnnSetFilter4dDescriptor(
            cudnnContext.filterDesc, dataType, tensorFormat, outDepth, inDepth, kH, kW));
    checkCudnn(
        cudnnGetConvolutionBackwardFilterAlgorithm(
            cudnnContext,
            cudnnContext.srcTensorDesc,
            cudnnContext.deltaTensorDesc,
            cudnnContext.convDesc,
            cudnnContext.filterDesc,
            CUDNN_CONVOLUTION_BWD_FILTER_PREFER_FASTEST,
            0,
            algo));

    INDArray epsNext = Nd4j.create(new int[] {miniBatch, inDepth, inH, inW}, 'c');
    int[] dstStride = epsNext.stride();

    Allocator allocator = AtomicAllocator.getInstance();
    CudaContext context =
        allocator
            .getFlowController()
            .prepareAction(input, weights, weightGradView, biasGradView, delta, epsNext);
    Pointer srcData = allocator.getPointer(input, context);
    Pointer filterData = allocator.getPointer(weights, context);
    Pointer filterGradData = allocator.getPointer(weightGradView, context);
    Pointer biasGradData = allocator.getPointer(biasGradView, context);
    Pointer deltaData = allocator.getPointer(delta, context);
    Pointer dstData = allocator.getPointer(epsNext, context);

    checkCudnn(cudnnSetStream(cudnnContext, new CUstream_st(context.getOldStream())));
    checkCudnn(
        cudnnSetTensor4dDescriptorEx(
            cudnnContext.dstTensorDesc,
            dataType,
            miniBatch,
            inDepth,
            inH,
            inW,
            dstStride[0],
            dstStride[1],
            dstStride[2],
            dstStride[3]));
    checkCudnn(
        cudnnGetConvolutionBackwardFilterWorkspaceSize(
            cudnnContext,
            cudnnContext.srcTensorDesc,
            cudnnContext.deltaTensorDesc,
            cudnnContext.convDesc,
            cudnnContext.filterDesc,
            algo[0],
            sizeInBytes));
    long sizeInBytes1 = sizeInBytes.get(0);
    checkCudnn(
        cudnnGetConvolutionBackwardDataWorkspaceSize(
            cudnnContext,
            cudnnContext.filterDesc,
            cudnnContext.deltaTensorDesc,
            cudnnContext.convDesc,
            cudnnContext.dstTensorDesc,
            algo[0],
            sizeInBytes));
    long sizeInBytes2 = sizeInBytes.get(0);
    if (sizeInBytes1 > workSpace.capacity() || sizeInBytes2 > workSpace.capacity()) {
      workSpace.deallocate();
      workSpace = new WorkSpace(Math.max(sizeInBytes1, sizeInBytes2));
    }

    checkCudnn(
        cudnnSetTensor4dDescriptor(
            cudnnContext.biasTensorDesc, tensorFormat, dataType, 1, outDepth, 1, 1));
    checkCudnn(
        cudnnConvolutionBackwardBias(
            cudnnContext,
            alpha,
            cudnnContext.deltaTensorDesc,
            deltaData,
            beta,
            cudnnContext.biasTensorDesc,
            biasGradData));
    checkCudnn(
        cudnnConvolutionBackwardFilter(
            cudnnContext,
            alpha,
            cudnnContext.srcTensorDesc,
            srcData,
            cudnnContext.deltaTensorDesc,
            deltaData,
            cudnnContext.convDesc,
            algo[0],
            workSpace,
            workSpace.capacity(),
            beta,
            cudnnContext.filterDesc,
            filterGradData));
    checkCudnn(
        cudnnConvolutionBackwardData(
            cudnnContext,
            alpha,
            cudnnContext.filterDesc,
            filterData,
            cudnnContext.deltaTensorDesc,
            deltaData,
            cudnnContext.convDesc,
            algo[0],
            workSpace,
            workSpace.capacity(),
            beta,
            cudnnContext.dstTensorDesc,
            dstData));

    allocator.registerAction(context, input, weights, weightGradView, biasGradView, delta, epsNext);

    Gradient retGradient = new DefaultGradient();
    retGradient.setGradientFor(ConvolutionParamInitializer.BIAS_KEY, biasGradView);
    retGradient.setGradientFor(ConvolutionParamInitializer.WEIGHT_KEY, weightGradView, 'c');

    return new Pair<>(retGradient, epsNext);
  }