示例#1
0
  /**
   * Runs n epochs of backpropagation.
   *
   * @param in
   * @param out
   */
  public void train(List<Point> in, List<Point> out, double learningRate, int n) {
    if (in.size() != out.size())
      throw new IllegalArgumentException(
          "Input lists must be the same size (were " + in.size() + " and " + out.size() + ").");

    for (int i : Series.series(n)) train(in, out, learningRate);
  }
示例#2
0
  /**
   * Modifies this map through a single backpropagation iteration using the given error values on
   * the output nodes.
   *
   * @param error
   */
  public void train(List<Double> error, double learningRate) {
    RealVector eOut = new ArrayRealVector(error.size());
    for (int i : series(error.size())) eOut.setEntry(i, error.get(i));

    // * gHidden: delta for the non-bias nodes of the hidden layer
    gHidden.setSubVector(0, stateHidden.getSubVector(0, n)); // optimize

    for (int i : Series.series(gHidden.getDimension()))
      gHidden.setEntry(i, activation.derivative(gHidden.getEntry(i)));

    eHiddenL = weights1.transpose().operate(eOut);
    eHidden.setSubVector(0, eHiddenL.getSubVector(0, h));
    for (int i : series(h)) eHidden.setEntry(i, eHidden.getEntry(i) * gHidden.getEntry(i));

    weights1Delta = MatrixTools.outer(eOut, stateHidden);
    weights1Delta = weights1Delta.scalarMultiply(-1.0 * learningRate); // optimize

    weights0Delta = MatrixTools.outer(eHidden, stateIn);
    weights0Delta = weights0Delta.scalarMultiply(-1.0 * learningRate);

    weights0 = weights0.add(weights0Delta);
    weights1 = weights1.add(weights1Delta);
  }