예제 #1
0
  public static void main(String[] args) {
    Logging.stopConsoleLogging();
    NeuralDataSet trainingSet = new BasicNeuralDataSet(XOR_INPUT, XOR_IDEAL);
    BasicNetwork network = EncogUtility.simpleFeedForward(2, 4, 0, 1, false);
    ResilientPropagation train = new ResilientPropagation(network, trainingSet);
    train.addStrategy(new RequiredImprovementStrategy(5));

    System.out.println("Perform initial train.");
    EncogUtility.trainToError(train, network, trainingSet, 0.01);
    TrainingContinuation cont = train.pause();
    System.out.println(
        Arrays.toString((double[]) cont.getContents().get(ResilientPropagation.LAST_GRADIENTS)));
    System.out.println(
        Arrays.toString((double[]) cont.getContents().get(ResilientPropagation.UPDATE_VALUES)));

    try {
      SerializeObject.save("resume.ser", cont);
      cont = (TrainingContinuation) SerializeObject.load("resume.ser");
    } catch (Exception ex) {
      ex.printStackTrace();
    }

    System.out.println(
        "Now trying a second train, with continue from the first.  Should stop after one iteration");
    ResilientPropagation train2 = new ResilientPropagation(network, trainingSet);
    train2.resume(cont);
    EncogUtility.trainToError(train2, network, trainingSet, 0.01);
  }
예제 #2
0
  public static void main(String args[]) {
    Logging.stopConsoleLogging();
    BasicNetwork network = generateNetwork();
    NeuralDataSet data = generateTraining();

    double rprop = evaluateRPROP(network, data);
    double mprop = evaluateMPROP(network, data);
    double factor = rprop / mprop;
    System.out.println("Factor improvement:" + factor);
  }
예제 #3
0
  public static void main(String[] args) {
    Logging.stopConsoleLogging();

    try {
      EncogPersistence program = new EncogPersistence();
      program.trainAndSave();
      program.loadAndEvaluate();
    } catch (Throwable t) {
      t.printStackTrace();
    }
  }
 public static void main(final String[] args) {
   Logging.stopConsoleLogging();
   if (args.length < 1) {
     System.out.println("Must specify command file.  See source for format.");
   } else {
     try {
       final ImageNeuralNetwork program = new ImageNeuralNetwork();
       program.execute(args[0]);
     } catch (final Exception e) {
       e.printStackTrace();
     }
   }
 }
  public static void main(final String args[]) {
    Logging.stopConsoleLogging();
    final TemporalXOR temp = new TemporalXOR();
    final NeuralDataSet trainingSet = temp.generate(100);

    final BasicNetwork elmanNetwork = ElmanXOR.createElmanNetwork();
    final BasicNetwork feedforwardNetwork = ElmanXOR.createFeedforwardNetwork();

    final double elmanError = ElmanXOR.trainNetwork("Elman", elmanNetwork, trainingSet);
    final double feedforwardError =
        ElmanXOR.trainNetwork("Feedforward", feedforwardNetwork, trainingSet);

    System.out.println("Best error rate with Elman Network: " + elmanError);
    System.out.println("Best error rate with Feedforward Network: " + feedforwardError);
    System.out.println(
        "Elman should be able to get into the 30% range,\nfeedforward should not go below 50%.\nThe recurrent Elment net can learn better in this case.");
    System.out.println(
        "If your results are not as good, try rerunning, or perhaps training longer.");
  }
예제 #6
0
 /**
  * The main method.
  *
  * @param args Args not really used.
  */
 public static void main(final String args[]) {
   Logging.stopConsoleLogging();
   (new OCR()).setVisible(true);
 }