Example #1
0
  /**
   * Discrete fourier transform 2d
   *
   * @param input the input to transform
   * @param rows the number of rows in the transformed output matrix
   * @param cols the number of columns in the transformed output matrix
   * @return the discrete fourier transform of the input
   */
  public static ComplexFloatMatrix complexDisceteFourierTransform(
      FloatMatrix input, int rows, int cols) {
    ComplexFloatMatrix base;

    // pad
    if (input.rows < rows || input.columns < cols)
      base = MatrixUtil.complexPadWithZeros(input, rows, cols);
    // truncation
    else if (input.rows > rows || input.columns > cols) {
      base = new ComplexFloatMatrix(input);
      base =
          base.get(
              MatrixUtil.toIndices(RangeUtils.interval(0, rows)),
              MatrixUtil.toIndices(RangeUtils.interval(0, cols)));
    } else base = new ComplexFloatMatrix(input);

    ComplexFloatMatrix temp = new ComplexFloatMatrix(base.rows, base.columns);
    ComplexFloatMatrix ret = new ComplexFloatMatrix(base.rows, base.columns);
    for (int i = 0; i < base.columns; i++) {
      ComplexFloatMatrix column = base.getColumn(i);
      temp.putColumn(i, complexDiscreteFourierTransform1d(column));
    }

    for (int i = 0; i < ret.rows; i++) {
      ComplexFloatMatrix row = temp.getRow(i);
      ret.putRow(i, complexDiscreteFourierTransform1d(row));
    }
    return ret;
  }
Example #2
0
  /**
   * 1d inverse discrete fourier transform see matlab's fft2 for more examples. Note that this will
   * throw an exception if the input isn't a vector
   *
   * @param inputC the input to transform
   * @return the inverse fourier transform of the passed in input
   */
  public static ComplexNDArray complexInverseDisceteFourierTransform1d(ComplexNDArray inputC) {
    if (inputC.shape().length != 1)
      throw new IllegalArgumentException("Illegal input: Must be a vector");
    double len = MatrixUtil.length(inputC);
    ComplexDouble c2 = new ComplexDouble(0, -2).muli(FastMath.PI).divi(len);
    ComplexDoubleMatrix range = MatrixUtil.complexRangeVector(0, len);
    ComplexDoubleMatrix div2 = range.transpose().mul(c2);
    ComplexDoubleMatrix div3 = range.mmul(div2).negi();
    ComplexDoubleMatrix matrix = exp(div3).div(len);
    ComplexDoubleMatrix complexRet = matrix.mmul(inputC);

    return ComplexNDArray.wrap(inputC, complexRet);
  }
Example #3
0
  /**
   * 1d inverse discrete fourier transform see matlab's fft2 for more examples. Note that this will
   * throw an exception if the input isn't a vector
   *
   * @param inputC the input to transform
   * @return the inverse fourier transform of the passed in input
   */
  public static ComplexFloatMatrix complexInverseDisceteFourierTransform1d(
      ComplexFloatMatrix inputC) {
    if (inputC.rows != 1 && inputC.columns != 1)
      throw new IllegalArgumentException("Illegal input: Must be a vector");
    float len = MatrixUtil.length(inputC);
    ComplexFloat c2 = new ComplexFloat(0, -2).muli((float) FastMath.PI).divi(len);
    ComplexFloatMatrix range = MatrixUtil.complexRangeVector(0, (int) len);
    ComplexFloatMatrix div2 = range.transpose().mul(c2);
    ComplexFloatMatrix div3 = range.mmul(div2).negi();
    ComplexFloatMatrix matrix = exp(div3).div(len);
    ComplexFloatMatrix complexRet =
        inputC.isRowVector() ? matrix.mmul(inputC) : inputC.mmul(matrix);

    return complexRet;
  }
Example #4
0
  /**
   * Discrete fourier transform 2d
   *
   * @param input the input to transform
   * @param shape the shape of the output matrix
   * @return the discrete fourier transform of the input
   */
  public static ComplexDoubleMatrix complexDisceteFourierTransform(NDArray input, int[] shape) {
    ComplexNDArray base;

    // pad
    if (ArrayUtil.anyLess(input.shape(), shape))
      base = MatrixUtil.complexPadWithZeros(input, shape);
    // truncation
    else if (ArrayUtil.anyMore(input.shape(), shape)) {
      base = new ComplexNDArray(shape);
      for (int i = 0; i < ArrayUtil.prod(shape); i++) base.put(i, input.get(i));
    } else base = new ComplexNDArray(input);

    ComplexNDArray temp = new ComplexNDArray(shape);
    ComplexNDArray ret = new ComplexNDArray(shape);

    for (int i = 0; i < base.columns; i++) {
      ComplexDoubleMatrix column = base.getColumn(i);
      temp.putColumn(i, complexDiscreteFourierTransform1d(column));
    }

    for (int i = 0; i < ret.rows; i++) {
      ComplexDoubleMatrix row = temp.getRow(i);
      ret.putRow(i, complexDiscreteFourierTransform1d(row));
    }
    return ret;
  }
  /**
   * Strips the dataset down to the specified labels and remaps them
   *
   * @param labels the labels to strip down to
   */
  public void filterAndStrip(int[] labels) {
    FloatDataSet filtered = filterBy(labels);
    List<Integer> newLabels = new ArrayList<>();

    // map new labels to index according to passed in labels
    Map<Integer, Integer> labelMap = new HashMap<>();

    for (int i = 0; i < labels.length; i++) labelMap.put(labels[i], i);

    // map examples
    for (int i = 0; i < filtered.numExamples(); i++) {
      int o2 = filtered.get(i).outcome();
      int outcome = labelMap.get(o2);
      newLabels.add(outcome);
    }

    FloatMatrix newLabelMatrix = new FloatMatrix(filtered.numExamples(), labels.length);

    if (newLabelMatrix.rows != newLabels.size())
      throw new IllegalStateException("Inconsistent label sizes");

    for (int i = 0; i < newLabelMatrix.rows; i++) {
      Integer i2 = newLabels.get(i);
      if (i2 == null) throw new IllegalStateException("Label not found on row " + i);
      FloatMatrix newRow = MatrixUtil.toOutcomeVectorFloat(i2, labels.length);
      newLabelMatrix.putRow(i, newRow);
    }

    setFirst(filtered.getFirst());
    setSecond(newLabelMatrix);
  }
  /**
   * Sets the outcome of a particular example
   *
   * @param example the example to applyTransformToDestination
   * @param label the label of the outcome
   */
  public void setOutcome(int example, int label) {
    if (example > numExamples()) throw new IllegalArgumentException("No example at " + example);
    if (label > numOutcomes() || label < 0) throw new IllegalArgumentException("Illegal label");

    FloatMatrix outcome = MatrixUtil.toOutcomeVectorFloat(label, numOutcomes());
    getSecond().putRow(example, outcome);
  }
Example #7
0
 /** Returns matrices of the right size for either binary or unary (terminal) classification */
 FloatMatrix randomClassificationMatrix() {
   // Leave the bias column with 0 values
   float range = 1.0f / (float) (Math.sqrt((float) numHidden));
   FloatMatrix ret = FloatMatrix.zeros(numOuts, numHidden + 1);
   FloatMatrix insert = MatrixUtil.rand(numOuts, numHidden, -range, range, rng);
   ret.put(interval(0, numOuts), interval(0, numHidden), insert);
   return SimpleBlas.scal(scalingForInit, ret);
 }
  public void normalizeZeroMeanZeroUnitVariance() {
    FloatMatrix columnMeans = getFirst().columnMeans();
    FloatMatrix columnStds = MatrixUtil.columnStdDeviation(getFirst());

    setFirst(getFirst().subiRowVector(columnMeans));
    columnStds.addi(1e-6f);
    setFirst(getFirst().diviRowVector(columnStds));
  }
Example #9
0
 public FloatMatrix getParameters() {
   return MatrixUtil.toFlattenedFloat(
       getNumParameters(),
       binaryTransform.values().iterator(),
       binaryClassification.values().iterator(),
       binaryFloatTensors.values().iterator(),
       unaryClassification.values().iterator(),
       featureVectors.values().iterator());
 }
Example #10
0
  /**
   * 1d discrete fourier transform, note that this will throw an exception if the passed in input
   * isn't a vector. See matlab's fft2 for more information
   *
   * @param inputC the input to transform
   * @return the the discrete fourier transform of the passed in input
   */
  public static ComplexFloatMatrix complexDiscreteFourierTransform1d(ComplexFloatMatrix inputC) {
    if (inputC.rows != 1 && inputC.columns != 1)
      throw new IllegalArgumentException("Illegal input: Must be a vector");

    float len = Math.max(inputC.rows, inputC.columns);
    ComplexFloat c2 = new ComplexFloat(0, -2).muli((float) FastMath.PI).divi(len);
    ComplexFloatMatrix range = MatrixUtil.complexRangeVector(0, len);
    ComplexFloatMatrix matrix = exp(range.mmul(range.transpose().mul(c2)));
    ComplexFloatMatrix complexRet =
        inputC.isRowVector() ? matrix.mmul(inputC) : inputC.mmul(matrix);
    return complexRet;
  }
  /**
   * Vectorizes the passed in text treating it as one document
   *
   * @param text the text to vectorize
   * @param label the label of the text
   * @return a dataset with a applyTransformToDestination of weights(relative to impl; could be word
   *     counts or tfidf scores)
   */
  @Override
  public DataSet vectorize(String text, String label) {
    Tokenizer tokenizer = tokenizerFactory.create(text);
    List<String> tokens = tokenizer.getTokens();
    DoubleMatrix input = new DoubleMatrix(1, vocab.size());
    for (int i = 0; i < tokens.size(); i++) {
      int idx = vocab.indexOf(tokens.get(i));
      if (vocab.indexOf(tokens.get(i)) >= 0) input.put(idx, wordCounts.getCount(tokens.get(i)));
    }

    DoubleMatrix labelMatrix = MatrixUtil.toOutcomeVector(labels.indexOf(label), labels.size());
    return new DataSet(input, labelMatrix);
  }
Example #12
0
 static FloatMatrix randomWordVector(int size, RandomGenerator rng) {
   return MatrixUtil.uniformFloat(rng, 1, size);
 }
Example #13
0
 FloatMatrix randomTransformBlock() {
   float range = 1.0f / (float) (Math.sqrt((float) numHidden) * 2.0f);
   FloatMatrix ret = MatrixUtil.rand(numHidden, numHidden, -range, range, rng).add(identity);
   return ret;
 }
 /** Divides the input data applyTransformToDestination by the max number in each row */
 public void scale() {
   MatrixUtil.scaleByMax(getFirst());
 }
 public void normalize() {
   MatrixUtil.normalizeMatrix(getFirst());
 }
Example #16
0
  public FloatMatrix getValueGradient(int iterations) {

    // We use TreeMap for each of these so that they stay in a
    // canonical sorted order
    // TODO: factor out the initialization routines
    // binaryTD stands for Transform Derivatives
    final MultiDimensionalMap<String, String, FloatMatrix> binaryTD =
        MultiDimensionalMap.newTreeBackedMap();
    // the derivatives of the FloatTensors for the binary nodes
    final MultiDimensionalMap<String, String, FloatTensor> binaryFloatTensorTD =
        MultiDimensionalMap.newTreeBackedMap();
    // binaryCD stands for Classification Derivatives
    final MultiDimensionalMap<String, String, FloatMatrix> binaryCD =
        MultiDimensionalMap.newTreeBackedMap();

    // unaryCD stands for Classification Derivatives
    final Map<String, FloatMatrix> unaryCD = new TreeMap<>();

    // word vector derivatives
    final Map<String, FloatMatrix> wordVectorD = new TreeMap<>();

    for (MultiDimensionalMap.Entry<String, String, FloatMatrix> entry :
        binaryTransform.entrySet()) {
      int numRows = entry.getValue().rows;
      int numCols = entry.getValue().columns;

      binaryTD.put(entry.getFirstKey(), entry.getSecondKey(), new FloatMatrix(numRows, numCols));
    }

    if (!combineClassification) {
      for (MultiDimensionalMap.Entry<String, String, FloatMatrix> entry :
          binaryClassification.entrySet()) {
        int numRows = entry.getValue().rows;
        int numCols = entry.getValue().columns;

        binaryCD.put(entry.getFirstKey(), entry.getSecondKey(), new FloatMatrix(numRows, numCols));
      }
    }

    if (useFloatTensors) {
      for (MultiDimensionalMap.Entry<String, String, FloatTensor> entry :
          binaryFloatTensors.entrySet()) {
        int numRows = entry.getValue().rows();
        int numCols = entry.getValue().columns;
        int numSlices = entry.getValue().slices();

        binaryFloatTensorTD.put(
            entry.getFirstKey(),
            entry.getSecondKey(),
            new FloatTensor(numRows, numCols, numSlices));
      }
    }

    for (Map.Entry<String, FloatMatrix> entry : unaryClassification.entrySet()) {
      int numRows = entry.getValue().rows;
      int numCols = entry.getValue().columns;
      unaryCD.put(entry.getKey(), new FloatMatrix(numRows, numCols));
    }
    for (Map.Entry<String, FloatMatrix> entry : featureVectors.entrySet()) {
      int numRows = entry.getValue().rows;
      int numCols = entry.getValue().columns;
      wordVectorD.put(entry.getKey(), new FloatMatrix(numRows, numCols));
    }

    final List<Tree> forwardPropTrees = new CopyOnWriteArrayList<>();
    Parallelization.iterateInParallel(
        trainingTrees,
        new Parallelization.RunnableWithParams<Tree>() {

          public void run(Tree currentItem, Object[] args) {
            Tree trainingTree = new Tree(currentItem);
            trainingTree.connect(new ArrayList<>(currentItem.children()));
            // this will attach the error vectors and the node vectors
            // to each node in the tree
            forwardPropagateTree(trainingTree);
            forwardPropTrees.add(trainingTree);
          }
        },
        rnTnActorSystem);

    // TODO: we may find a big speedup by separating the derivatives and then summing
    final AtomicDouble error = new AtomicDouble(0);
    Parallelization.iterateInParallel(
        forwardPropTrees,
        new Parallelization.RunnableWithParams<Tree>() {

          public void run(Tree currentItem, Object[] args) {
            backpropDerivativesAndError(
                currentItem, binaryTD, binaryCD, binaryFloatTensorTD, unaryCD, wordVectorD);
            error.addAndGet(currentItem.errorSum());
          }
        },
        new Parallelization.RunnableWithParams<Tree>() {

          public void run(Tree currentItem, Object[] args) {}
        },
        rnTnActorSystem,
        new Object[] {binaryTD, binaryCD, binaryFloatTensorTD, unaryCD, wordVectorD});

    // scale the error by the number of sentences so that the
    // regularization isn't drowned out for large training batchs
    float scale = (1.0f / trainingTrees.size());
    value = error.floatValue() * scale;

    value += scaleAndRegularize(binaryTD, binaryTransform, scale, regTransformMatrix);
    value += scaleAndRegularize(binaryCD, binaryClassification, scale, regClassification);
    value +=
        scaleAndRegularizeFloatTensor(
            binaryFloatTensorTD, binaryFloatTensors, scale, regTransformFloatTensor);
    value += scaleAndRegularize(unaryCD, unaryClassification, scale, regClassification);
    value += scaleAndRegularize(wordVectorD, featureVectors, scale, regWordVector);

    FloatMatrix derivative =
        MatrixUtil.toFlattenedFloat(
            getNumParameters(),
            binaryTD.values().iterator(),
            binaryCD.values().iterator(),
            binaryFloatTensorTD.values().iterator(),
            unaryCD.values().iterator(),
            wordVectorD.values().iterator());

    if (paramAdaGrad == null) paramAdaGrad = new AdaGradFloat(1, derivative.columns);

    derivative.muli(paramAdaGrad.getLearningRates(derivative));

    return derivative;
  }
 public void roundInputToTheNearest(int numDecimalPlaces) {
   setFirst(MatrixUtil.roundToTheNearest(getFirst(), numDecimalPlaces));
 }