コード例 #1
0
 /**
  * Tests out the perceptron with the classic xor test
  *
  * @param args ignored
  */
 public static void main(String[] args) {
   double[][][] data = {
     {{1, 1, 1, 1}, {0}},
     {{1, 0, 1, 0}, {1}},
     {{0, 1, 0, 1}, {1}},
     {{0, 0, 0, 0}, {0}}
   };
   Instance[] patterns = new Instance[data.length];
   for (int i = 0; i < patterns.length; i++) {
     patterns[i] = new Instance(data[i][0]);
     patterns[i].setLabel(new Instance(data[i][1]));
   }
   FeedForwardNeuralNetworkFactory factory = new FeedForwardNeuralNetworkFactory();
   FeedForwardNetwork network = factory.createClassificationNetwork(new int[] {4, 3, 1});
   ErrorMeasure measure = new SumOfSquaresError();
   DataSet set = new DataSet(patterns);
   NeuralNetworkOptimizationProblem nno =
       new NeuralNetworkOptimizationProblem(set, network, measure);
   OptimizationAlgorithm o = new RandomizedHillClimbing(nno);
   FixedIterationTrainer fit = new FixedIterationTrainer(o, 5000);
   fit.train();
   Instance opt = o.getOptimal();
   network.setWeights(opt.getData());
   for (int i = 0; i < patterns.length; i++) {
     network.setInputValues(patterns[i].getData());
     network.run();
     System.out.println("~~");
     System.out.println(patterns[i].getLabel());
     System.out.println(network.getOutputValues());
   }
 }
コード例 #2
0
  /**
   * The test main
   *
   * @param args ignored
   */
  public static void main(String[] args) {
    int[] copies = new int[NUM_ITEMS];
    Arrays.fill(copies, COPIES_EACH);
    double[] weights = {
      14.006873891334399,
      0.9388306474803154,
      26.119866173905713,
      20.546411641966962,
      12.456703411391896,
      32.322455327605255,
      38.750240535343956,
      23.859794102127896,
      45.011525134982286,
      10.776070365757933,
      46.065756634733006,
      7.828572903429926,
      24.743263676002634,
      33.20916476805507,
      16.51246534149665,
      23.443925006858134,
      18.57022530279454,
      5.706896488446161,
      23.44607697986756,
      1.5545804205003566,
      27.0302859936824,
      21.097619402619628,
      43.60173756385764,
      49.44832347485482,
      25.910156034801474,
      27.91751206001118,
      36.658173210220255,
      40.881378221999206,
      48.83228771437947,
      35.49885544313467,
      16.247757455771072,
      25.53223124143824,
      2.400993598957707,
      12.408533226752189,
      35.26405639169894,
      46.35644830194322,
      18.009317731328604,
      47.96332151014204,
      20.81843428091102,
      15.819063866703608
    };
    double[] volumes = {
      44.57809711968351,
      1.988378753951514,
      47.593739208727456,
      36.569659867427994,
      1.5845284028427165,
      41.7861748607473,
      24.69594875244368,
      43.03123587633294,
      7.248980459072457,
      23.327415667901835,
      36.105898702916086,
      24.87957120462097,
      36.910177249731724,
      27.30395021307583,
      37.74427091808214,
      21.681239410167937,
      8.318371979533667,
      16.88207551035857,
      34.91767868192272,
      9.456202413374893,
      47.184521478105424,
      35.65391669513947,
      7.1158444301557155,
      44.53433689634305,
      49.16774307587011,
      13.564617532166368,
      38.36035512523829,
      2.3636140632733618,
      38.08282614908438,
      49.310535335495906,
      42.75495808871727,
      28.422383043559908,
      31.486561856652965,
      7.283678338886252,
      5.795560154240054,
      38.749456539160306,
      37.74109110751328,
      0.7802313639065139,
      19.468811616414722,
      17.029576884574187
    };
    int[] ranges = new int[NUM_ITEMS];
    Arrays.fill(ranges, COPIES_EACH + 1);
    EvaluationFunction ef =
        new KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies);
    System.out.println("Trial;Fitness;RunningTime");
    for (int x = 10; x <= 1000; x = x + 10) {
      Distribution odd = new DiscreteUniformDistribution(ranges);
      NeighborFunction nf = new DiscreteChangeOneNeighbor(ranges);
      MutationFunction mf = new DiscreteChangeOneMutation(ranges);
      CrossoverFunction cf = new UniformCrossOver();
      Distribution df = new DiscreteDependencyTree(.1, ranges);
      HillClimbingProblem hcp = new GenericHillClimbingProblem(ef, odd, nf);
      GeneticAlgorithmProblem gap = new GenericGeneticAlgorithmProblem(ef, odd, mf, cf);
      ProbabilisticOptimizationProblem pop =
          new GenericProbabilisticOptimizationProblem(ef, odd, df);

      MIMIC mimic = new MIMIC(x, x / 5, pop);
      FixedIterationTrainer fit = new FixedIterationTrainer(mimic, 500);
      fit = new FixedIterationTrainer(mimic, 500);
      double MIMIC_start = System.nanoTime();
      fit.train();
      double MIMIC_end = System.nanoTime();
      double MIMIC_trainingTime = MIMIC_end - MIMIC_start;
      MIMIC_trainingTime /= Math.pow(10, 9);

      /** Print out the values for each trial */
      System.out.println(x + ";" + ef.value(mimic.getOptimal()) + ";" + MIMIC_trainingTime);
    }
  }
コード例 #3
0
  public void run(int iterations) throws Exception {
    // 1) Construct data instances for training.  These will also be run
    //    through the network at the bottom to verify the output
    CSVDataSetReader reader = new CSVDataSetReader("data/letter_training_new.data");
    DataSet set = reader.read();
    LabelSplitFilter flt = new LabelSplitFilter();
    flt.filter(set);
    DataSetLabelBinarySeperator.seperateLabels(set);
    DataSetDescription desc = set.getDescription();
    DataSetDescription labelDesc = desc.getLabelDescription();

    // 2) Instantiate a network using the FeedForwardNeuralNetworkFactory.  This network
    //    will be our classifier.
    FeedForwardNeuralNetworkFactory factory = new FeedForwardNeuralNetworkFactory();
    // 2a) These numbers correspond to the number of nodes in each layer.
    //     This network has 4 input nodes, 3 hidden nodes in 1 layer, and 1 output node in the
    // output layer.
    FeedForwardNetwork network =
        factory.createClassificationNetwork(
            new int[] {
              desc.getAttributeCount(),
              factory.getOptimalHiddenLayerNodes(desc, labelDesc),
              labelDesc.getDiscreteRange()
            });

    // 3) Instantiate a measure, which is used to evaluate each possible set of weights.
    ErrorMeasure measure = new SumOfSquaresError();

    // 4) Instantiate a DataSet, which adapts a set of instances to the optimization problem.
    // DataSet set = new DataSet(patterns);

    // 5) Instantiate an optimization problem, which is used to specify the dataset, evaluation
    //    function, mutator and crossover function (for Genetic Algorithms), and any other
    //    parameters used in optimization.
    NeuralNetworkOptimizationProblem nno =
        new NeuralNetworkOptimizationProblem(set, network, measure);

    // 6) Instantiate a specific OptimizationAlgorithm, which defines how we pick our next potential
    //    hypothesis.
    OptimizationAlgorithm o = new RandomizedHillClimbing(nno);

    // 7) Instantiate a trainer.  The FixtIterationTrainer takes another trainer (in this case,
    //    an OptimizationAlgorithm) and executes it a specified number of times.
    FixedIterationTrainer fit = new FixedIterationTrainer(o, iterations);

    // 8) Run the trainer.  This may take a little while to run, depending on the
    // OptimizationAlgorithm,
    //    size of the data, and number of iterations.
    fit.train();

    // 9) Once training is done, get the optimal solution from the OptimizationAlgorithm.  These are
    // the
    //    optimal weights found for this network.
    Instance opt = o.getOptimal();
    network.setWeights(opt.getData());

    // 10) Run the training data through the network with the weights discovered through
    // optimization, and
    //    print out the expected label and result of the classifier for each instance.
    int[] labels = {0, 1};
    TestMetric acc = new AccuracyTestMetric();
    TestMetric cm = new ConfusionMatrixTestMetric(labels);
    Tester t = new NeuralNetworkTester(network, acc, cm);
    t.test(set.getInstances());

    acc.printResults();
  }
コード例 #4
0
  public static void main(String[] args) {
    if (args.length < 2) {
      System.out.println("Provide a input size and repeat count");
      System.exit(0);
    }
    int N = Integer.parseInt(args[0]);
    if (N < 0) {
      System.out.println(" N cannot be negaitve.");
      System.exit(0);
    }
    Random random = new Random();
    // create the random points
    double[][] points = new double[N][2];
    for (int i = 0; i < points.length; i++) {
      points[i][0] = random.nextDouble();
      points[i][1] = random.nextDouble();
    }
    int iterations = Integer.parseInt(args[1]);
    // for rhc, sa, and ga we use a permutation based encoding
    TravelingSalesmanEvaluationFunction ef = new TravelingSalesmanRouteEvaluationFunction(points);
    Distribution odd = new DiscretePermutationDistribution(N);
    NeighborFunction nf = new SwapNeighbor();
    MutationFunction mf = new SwapMutation();
    CrossoverFunction cf = new TravelingSalesmanCrossOver(ef);
    HillClimbingProblem hcp = new GenericHillClimbingProblem(ef, odd, nf);
    GeneticAlgorithmProblem gap = new GenericGeneticAlgorithmProblem(ef, odd, mf, cf);

    System.out.println("Randomized Hill Climbing\n---------------------------------");
    for (int i = 0; i < iterations; i++) {
      RandomizedHillClimbing rhc = new RandomizedHillClimbing(hcp);
      long t = System.nanoTime();
      FixedIterationTrainer fit = new FixedIterationTrainer(rhc, 200000);
      fit.train();
      System.out.println(
          ef.value(rhc.getOptimal()) + ", " + (((double) (System.nanoTime() - t)) / 1e9d));
    }

    System.out.println("Simulated Annealing \n---------------------------------");
    for (int i = 0; i < iterations; i++) {
      SimulatedAnnealing sa = new SimulatedAnnealing(1E12, .95, hcp);
      long t = System.nanoTime();
      FixedIterationTrainer fit = new FixedIterationTrainer(sa, 200000);
      fit.train();
      System.out.println(
          ef.value(sa.getOptimal()) + ", " + (((double) (System.nanoTime() - t)) / 1e9d));
    }

    System.out.println("Genetic Algorithm\n---------------------------------");
    for (int i = 0; i < iterations; i++) {
      StandardGeneticAlgorithm ga = new StandardGeneticAlgorithm(200, 150, 10, gap);
      long t = System.nanoTime();
      FixedIterationTrainer fit = new FixedIterationTrainer(ga, 1000);
      fit.train();
      System.out.println(
          ef.value(ga.getOptimal()) + ", " + (((double) (System.nanoTime() - t)) / 1e9d));
    }

    System.out.println("MIMIC \n---------------------------------");

    // for mimic we use a sort encoding
    int[] ranges = new int[N];
    Arrays.fill(ranges, N);
    odd = new DiscreteUniformDistribution(ranges);
    Distribution df = new DiscreteDependencyTree(.1, ranges);

    for (int i = 0; i < iterations; i++) {
      ProbabilisticOptimizationProblem pop =
          new GenericProbabilisticOptimizationProblem(ef, odd, df);
      MIMIC mimic = new MIMIC(200, 60, pop);
      long t = System.nanoTime();
      FixedIterationTrainer fit = new FixedIterationTrainer(mimic, 1000);
      fit.train();
      System.out.println(
          ef.value(mimic.getOptimal()) + ", " + (((double) (System.nanoTime() - t)) / 1e9d));
    }
  }
コード例 #5
0
  /**
   * The test main
   *
   * @param args ignored
   */
  public static void main(String[] args) {
    int[] copies = new int[NUM_ITEMS];
    Arrays.fill(copies, COPIES_EACH);
    double[] weights = new double[NUM_ITEMS];
    double[] volumes = new double[NUM_ITEMS];
    for (int i = 0; i < NUM_ITEMS; i++) {
      weights[i] = random.nextDouble() * MAX_WEIGHT;
      volumes[i] = random.nextDouble() * MAX_VOLUME;
    }
    int[] ranges = new int[NUM_ITEMS];
    Arrays.fill(ranges, COPIES_EACH + 1);
    EvaluationFunction ef =
        new KnapsackEvaluationFunction(weights, volumes, KNAPSACK_VOLUME, copies);
    Distribution odd = new DiscreteUniformDistribution(ranges);
    NeighborFunction nf = new DiscreteChangeOneNeighbor(ranges);
    MutationFunction mf = new DiscreteChangeOneMutation(ranges);
    CrossoverFunction cf = new UniformCrossOver();
    Distribution df = new DiscreteDependencyTree(.1, ranges);
    HillClimbingProblem hcp = new GenericHillClimbingProblem(ef, odd, nf);
    GeneticAlgorithmProblem gap = new GenericGeneticAlgorithmProblem(ef, odd, mf, cf);
    ProbabilisticOptimizationProblem pop = new GenericProbabilisticOptimizationProblem(ef, odd, df);

    for (int baseIteration : new int[] {1000, 10000, 50000, 100000, 150000, 200000}) {

      long startTime = System.currentTimeMillis();
      RandomizedHillClimbing rhc = new RandomizedHillClimbing(hcp);
      FixedIterationTrainer fit = new FixedIterationTrainer(rhc, baseIteration);
      fit.train();
      long endTime = System.currentTimeMillis();
      System.out.println(
          "RHC\t"
              + baseIteration
              + "\t"
              + (endTime - startTime)
              + "\t"
              + ef.value(rhc.getOptimal()));

      startTime = System.currentTimeMillis();
      SimulatedAnnealing sa = new SimulatedAnnealing(100, .95, hcp);
      fit = new FixedIterationTrainer(sa, baseIteration);
      fit.train();
      endTime = System.currentTimeMillis();
      System.out.println(
          "SA\t" + baseIteration + "\t" + (endTime - startTime) + "\t" + ef.value(sa.getOptimal()));

      startTime = System.currentTimeMillis();
      StandardGeneticAlgorithm ga = new StandardGeneticAlgorithm(100, 75, 12, gap);
      fit = new FixedIterationTrainer(ga, baseIteration / 100);
      fit.train();
      endTime = System.currentTimeMillis();
      System.out.println(
          "GA\t" + baseIteration + "\t" + (endTime - startTime) + "\t" + ef.value(ga.getOptimal()));

      startTime = System.currentTimeMillis();
      MIMIC mimic = new MIMIC(100, 50, pop);
      fit = new FixedIterationTrainer(mimic, baseIteration / 100);
      fit.train();
      endTime = System.currentTimeMillis();
      System.out.println(
          "MIMIC\t"
              + baseIteration
              + "\t"
              + (endTime - startTime)
              + "\t"
              + ef.value(mimic.getOptimal()));
    }
  }