Ejemplo n.º 1
0
  public NeuralNetwork(int input, int hidden, int output) {
    this.layers = new int[] {input, hidden, output};
    df = new DecimalFormat("#0.000#");
    ef = new DecimalFormat("#");

    /** Create all neurons and connections Connections are created in the neuron class */
    for (int i = 0; i < layers.length; i++) {
      if (i == 0) { // input layer
        for (int j = 0; j < layers[i]; j++) {
          Neuron neuron = new Neuron();
          inputLayer.add(neuron);
        }
      } else if (i == 1) { // hidden layer
        for (int j = 0; j < layers[i]; j++) {
          Neuron neuron = new Neuron();
          neuron.addInConnectionsS(inputLayer);
          neuron.addBiasConnection(bias);
          hiddenLayer.add(neuron);
        }
      } else if (i == 2) { // output layer
        for (int j = 0; j < layers[i]; j++) {
          Neuron neuron = new Neuron();
          neuron.addInConnectionsS(hiddenLayer);
          neuron.addBiasConnection(bias);
          outputLayer.add(neuron);
        }
      } else {
        System.out.println("!Error NeuralNetwork init");
      }
    }

    // initialize random weights
    for (Neuron neuron : hiddenLayer) {
      ArrayList<Connection> connections = neuron.getAllInConnections();
      for (Connection conn : connections) {
        double newWeight = getRandom();
        conn.setWeight(newWeight);
      }
    }
    for (Neuron neuron : outputLayer) {
      ArrayList<Connection> connections = neuron.getAllInConnections();
      for (Connection conn : connections) {
        double newWeight = getRandom();
        conn.setWeight(newWeight);
      }
    }

    // reset id counters
    Neuron.counter = 0;
    Connection.counter = 0;

    if (isTrained) {
      trainedWeights();
      updateAllWeights();
    }
  }
 void initialize(DepressingSynapseFilter filter) {
   for (int i = 0; i < cells.length; i++) {
     for (int j = 0; j < cells[i].length; j++) {
       for (Neuron n : cells[i][j]) {
         if (n != null) {
           n.initialize(filter);
         }
       }
     }
   }
 }
 void reset() {
   for (int i = 0; i < cells.length; i++) {
     for (int j = 0; j < cells[i].length; j++) {
       for (Neuron n : cells[i][j]) {
         if (n != null) {
           n.reset();
         }
       }
     }
   }
 }
Ejemplo n.º 4
0
  public void printWeightUpdate() {

    String fileName = "WeightFile.txt";
    try {

      FileWriter fileWriter = new FileWriter(fileName);

      BufferedWriter bufferedWriter = new BufferedWriter(fileWriter);

      System.out.println(
          "printWeightUpdate, put this i trainedWeights() and set isTrained to true");
      // weights for the hidden layer
      for (Neuron n : hiddenLayer) {
        ArrayList<Connection> connections = n.getAllInConnections();
        for (Connection con : connections) {
          String w = df.format(con.getWeight());
          System.out.println(
              "weightUpdate.put(weightKey(" + n.id + ", " + con.id + "), " + w + ");");

          bufferedWriter.write(ef.format(n.id));
          bufferedWriter.write(" ");
          bufferedWriter.write(ef.format(con.id));
          bufferedWriter.write(" ");
          bufferedWriter.write(w);
          bufferedWriter.newLine();
        }
      }
      // weights for the output layer
      for (Neuron n : outputLayer) {
        ArrayList<Connection> connections = n.getAllInConnections();
        for (Connection con : connections) {
          String w = df.format(con.getWeight());
          System.out.println(
              "weightUpdate.put(weightKey(" + n.id + ", " + con.id + "), " + w + ");");

          bufferedWriter.write(ef.format(n.id));
          bufferedWriter.write(" ");
          bufferedWriter.write(ef.format(con.id));
          bufferedWriter.write(" ");
          bufferedWriter.write(w);
          bufferedWriter.newLine();
        }
      }
      System.out.println();
      bufferedWriter.close();
    } catch (IOException ex) {

      System.out.println("Error writing to file " + fileName);
    }
  }
Ejemplo n.º 5
0
  /**
   * all output propagate back
   *
   * @param expectedOutput first calculate the partial derivative of the error with respect to each
   *     of the weight leading into the output neurons bias is also updated here
   */
  public void applyBackpropagation(double expectedOutput[]) {

    // error check, normalize value ]0;1[
    for (int i = 0; i < expectedOutput.length; i++) {
      double d = expectedOutput[i];
      if (d < 0 || d > 1) {
        if (d < 0) expectedOutput[i] = 0 + epsilon;
        else expectedOutput[i] = 1 - epsilon;
      }
    }

    int i = 0;
    for (Neuron n : outputLayer) {
      ArrayList<Connection> connections = n.getAllInConnections();
      for (Connection con : connections) {
        double ak = n.getOutput();
        double ai = con.leftNeuron.getOutput();
        double desiredOutput = expectedOutput[i];

        double partialDerivative = -ak * (1 - ak) * ai * (desiredOutput - ak);
        double deltaWeight = -learningRate * partialDerivative;
        double newWeight = con.getWeight() + deltaWeight;
        con.setDeltaWeight(deltaWeight);
        con.setWeight(newWeight + momentum * con.getPrevDeltaWeight());
      }
      i++;
    }

    // update weights for the hidden layer
    for (Neuron n : hiddenLayer) {
      ArrayList<Connection> connections = n.getAllInConnections();
      for (Connection con : connections) {
        double aj = n.getOutput();
        double ai = con.leftNeuron.getOutput();
        double sumKoutputs = 0;
        int j = 0;
        for (Neuron out_neu : outputLayer) {
          double wjk = out_neu.getConnection(n.id).getWeight();
          double desiredOutput = (double) expectedOutput[j];
          double ak = out_neu.getOutput();
          j++;
          sumKoutputs = sumKoutputs + (-(desiredOutput - ak) * ak * (1 - ak) * wjk);
        }

        double partialDerivative = aj * (1 - aj) * ai * sumKoutputs;
        double deltaWeight = -learningRate * partialDerivative;
        double newWeight = con.getWeight() + deltaWeight;
        con.setDeltaWeight(deltaWeight);
        con.setWeight(newWeight + momentum * con.getPrevDeltaWeight());
      }
    }
  }
Ejemplo n.º 6
0
 public void printAllWeights() {
   System.out.println("printAllWeights");
   // weights for the hidden layer
   for (Neuron n : hiddenLayer) {
     ArrayList<Connection> connections = n.getAllInConnections();
     for (Connection con : connections) {
       double w = con.getWeight();
       System.out.println("n=" + n.id + " c=" + con.id + " w=" + w);
     }
   }
   // weights for the output layer
   for (Neuron n : outputLayer) {
     ArrayList<Connection> connections = n.getAllInConnections();
     for (Connection con : connections) {
       double w = con.getWeight();
       System.out.println("n=" + n.id + " c=" + con.id + " w=" + w);
     }
   }
   System.out.println();
 }
Ejemplo n.º 7
0
 /** Take from hash table and put into all weights */
 public void updateAllWeights() {
   // update weights for the output layer
   for (Neuron n : outputLayer) {
     ArrayList<Connection> connections = n.getAllInConnections();
     for (Connection con : connections) {
       String key = weightKey(n.id, con.id);
       double newWeight = weightUpdate.get(key);
       con.setWeight(newWeight);
     }
   }
   // update weights for the hidden layer
   for (Neuron n : hiddenLayer) {
     ArrayList<Connection> connections = n.getAllInConnections();
     for (Connection con : connections) {
       String key = weightKey(n.id, con.id);
       double newWeight = weightUpdate.get(key);
       con.setWeight(newWeight);
     }
   }
 }
Ejemplo n.º 8
0
 /** Calculate the output of the neural network based on the input The forward operation */
 public void activate() {
   for (Neuron n : hiddenLayer) n.calculateOutput();
   for (Neuron n : outputLayer) n.calculateOutput();
 }
Ejemplo n.º 9
0
  private void adaptation(
      SetOfIOPairs trainingSet, int maxK, double eps, double lambda, double micro) {
    //         trainingSet  : trenovacia mnozina
    //         maxK         : maximalny pocet iteracii
    //         eps          : pozadovana presnost normovanej dlzky gradientu
    //         lambda       : rychlost ucenia (0.1)
    //         micro        : momentovy clen
    double delta;
    Gradients deltaGradients = new Gradients(this);
    Gradients totalGradients = new Gradients(this);
    Gradients partialGradients = new Gradients(this);

    System.out.println("setting up random weights and thresholds ...");

    // prahy a vahy neuronovej siete nastavime na nahodne hodnoty, delta-gradienty vynulujeme (oni
    // sa nuluju uz pri init)
    for (int il = this.numberOfLayers() - 1;
        il >= 1;
        il--) { // iteracia cez vsetky vrstvy nadol okrem poslednej
      NeuralLayer currentLayer = this.getLayer(il);
      for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // pre kazdy neuron na vrstve
        Neuron currentNeuron = currentLayer.getNeuron(in);
        currentNeuron.threshold = 2 * this.random() - 1;
        // deltaGradients.setThreshold(il,in,0.0);
        for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) {
          currentNeuron.getInput(ii).weight = 2 * this.random() - 1;
          // deltaGradients.setWeight(il,in,ii,0.0);
        } // end ii
      } // end in
    } // end il

    int currK = 0; // citac iteracii
    double currE =
        Double.POSITIVE_INFINITY; // pociatocna aktualna presnost bude nekonecna (tendencia
    // znizovania)

    System.out.println("entering adaptation loop ... (maxK = " + maxK + ")");

    while (currK < maxK && currE > eps) {
      computeTotalGradient(totalGradients, partialGradients, trainingSet);
      for (int il = this.numberOfLayers() - 1;
          il >= 1;
          il--) { // iteracia cez vsetky vrstvy nadol okrem poslednej
        NeuralLayer currentLayer = this.getLayer(il);

        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // pre kazdy neuron na vrstve
          Neuron currentNeuron = currentLayer.getNeuron(in);
          delta =
              -lambda * totalGradients.getThreshold(il, in)
                  + micro * deltaGradients.getThreshold(il, in);
          currentNeuron.threshold += delta;
          deltaGradients.setThreshold(il, in, delta);
        } // end for ii 1

        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // pre kazdy neuron na vrstve
          Neuron currentNeuron = currentLayer.getNeuron(in);
          for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) { // a pre kazdy vstup neuronu
            delta =
                -lambda * totalGradients.getWeight(il, in, ii)
                    + micro * deltaGradients.getWeight(il, in, ii);
            currentNeuron.getInput(ii).weight += delta;
            deltaGradients.setWeight(il, in, ii, delta);
          } // end for ii
        } // end for in 2
      } // end for il

      currE = totalGradients.getGradientAbs();
      currK++;
      if (currK % 25 == 0) System.out.println("currK=" + currK + "   currE=" + currE);
    } // end while
  }
Ejemplo n.º 10
0
  private void computeGradient(
      Gradients gradients, Vector<Double> inputs, Vector<Double> requiredOutputs) {
    // Gradients gradients = new Gradients(this);
    activities(inputs);
    for (int il = this.numberOfLayers() - 1;
        il >= 1;
        il--) { // backpropagation cez vsetky vrstvy okrem poslednej
      NeuralLayer currentLayer = this.getLayer(il);

      if (currentLayer.isLayerTop()) { // ak sa jedna o najvyssiu vrstvu
        // pridame gradient prahov pre danu vrstvu do odpovedajuceho vektora a tento gradient
        // pocitame cez neurony :
        // gradients.thresholds.add(il, new Vector<Double>());
        for (int in = 0;
            in < currentLayer.numberOfNeurons();
            in++) { // pre vsetky neurony na vrstve
          Neuron currentNeuron = currentLayer.getNeuron(in);
          gradients.setThreshold(
              il,
              in,
              currentNeuron.output
                  * (1 - currentNeuron.output)
                  * (currentNeuron.output - requiredOutputs.elementAt(in)));
        } // end for each neuron

        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // for each neuron
          Neuron currentNeuron = currentLayer.getNeuron(in);
          for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) { // for each neuron's input
            NeuralInput currentInput = currentNeuron.getInput(ii);
            gradients.setWeight(
                il,
                in,
                ii,
                gradients.getThreshold(il, in) * currentLayer.lowerLayer().getNeuron(ii).output);
          } // end for each input
        } // end for each neuron

      } else { // ak sa jedna o spodnejsie vrstvy (najnizsiu vrstvu nepocitame, ideme len po 1.)
        // pocitame gradient prahov :
        // gradients.thresholds.add(il, new Vector<Double>());
        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // for each neuron
          double aux = 0;
          // iterujeme cez vsetky axony neuronu (resp. synapsie neuronov na vyssej vrstve)
          for (int ia = 0; ia < currentLayer.upperLayer().numberOfNeurons(); ia++) {
            aux +=
                gradients.getThreshold(il + 1, ia)
                    * currentLayer.upperLayer().getNeuron(ia).getInput(in).weight;
          }
          gradients.setThreshold(
              il,
              in,
              currentLayer.getNeuron(in).output * (1 - currentLayer.getNeuron(in).output) * aux);
        } // end for each neuron

        // pocitame gradienty vah :
        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // for each neuron
          Neuron currentNeuron = currentLayer.getNeuron(in);
          for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) { // for each neuron's input
            NeuralInput currentInput = currentNeuron.getInput(ii);
            gradients.setWeight(
                il,
                in,
                ii,
                gradients.getThreshold(il, in) * currentLayer.lowerLayer().getNeuron(ii).output);
          } // end for each input
        } // end for each neuron
      } // end layer IF
    } // end backgropagation for each layer
    // return gradients;
  }