예제 #1
0
  private void computeTotalGradient(
      Gradients totalGradients, Gradients partialGradients, SetOfIOPairs trainingSet) {
    // na zaciatku sa inicializuju gradienty (total)
    totalGradients.resetGradients();
    // partialGradients.resetGradients();
    // Gradients totalGradients = new Gradients(this);
    // Gradients partialGradients = new Gradients(this); /***/

    for (SetOfIOPairs.IOPair pair : trainingSet.pairs) { // pre kazdy par trenovacej mnoziny
      // partialGradients = computeGradient(pair.inputs, pair.outputs);
      computeGradient(partialGradients, pair.inputs, pair.outputs);
      for (int il = this.numberOfLayers() - 1; il >= 1; il--) { // pre vsetky vrstvy okrem poslednej
        NeuralLayer currentLayer = this.getLayer(il);
        for (int in = 0;
            in < currentLayer.numberOfNeurons();
            in++) { // pre vsetky neurony na currentLayer
          // upravime gradient prahov :
          totalGradients.incrementThreshold(il, in, partialGradients.getThreshold(il, in));
          for (int ii = 0;
              ii < currentLayer.lowerLayer().numberOfNeurons();
              ii++) { // pre vsetky vstupy
            totalGradients.incrementWeight(il, in, ii, partialGradients.getWeight(il, in, ii));
          }
        }
      } // end for layer
    } // end foreach
    // return totalGradients;
  } // end method
예제 #2
0
 public void connectLayers(NeuralLayer connectFrom, NeuralLayer connectTo) {
   for (Neuron layer1Neuron : connectFrom.getNeurons()) {
     for (Neuron layer2Neuron : connectTo.getNeurons()) {
       layer2Neuron.addInputConnection(layer1Neuron, 0.0);
     }
   }
 }
예제 #3
0
 private void setBiasNeurons(int startIndex) throws DuplicateNeuronID_Exception {
   InputNeuron biasNeuron;
   for (NeuralLayer layer : hiddenLayers) {
     biasNeuron = new InputNeuron(startIndex);
     layer.setBiasNeuron(biasNeuron);
     mapNeuronToID(biasNeuron);
     startIndex++;
   }
   biasNeuron = new InputNeuron(startIndex);
   outputLayer.setBiasNeuron(biasNeuron);
   mapNeuronToID(biasNeuron);
 }
예제 #4
0
 private void updateAllNeuronsMap() throws DuplicateNeuronID_Exception {
   allNeurons.clear();
   for (Neuron neuron : inputLayer.getNeurons()) {
     mapNeuronToID(neuron);
   }
   for (NeuralLayer layer : hiddenLayers) {
     for (Neuron neuron : layer.getNeurons()) {
       mapNeuronToID(neuron);
     }
   }
   for (Neuron neuron : outputLayer.getNeurons()) {
     mapNeuronToID(neuron);
   }
 }
예제 #5
0
 public void update() throws DuplicateNeuronID_Exception {
   updateAllNeuronsMap();
   for (Neuron neuron : inputLayer.getNeurons()) {
     neuron.update();
   }
   for (NeuralLayer layer : hiddenLayers) {
     for (Neuron neuron : layer.getNeurons()) {
       neuron.update();
     }
   }
   for (Neuron neuron : outputLayer.getNeurons()) {
     neuron.update();
   }
 }
예제 #6
0
  public List<Double> getOutputs() {

    List<Double> output = new ArrayList<Double>();
    for (Neuron neuron : outputLayer.getNeurons()) {
      output.add(neuron.getOutputValue());
    }
    return output;
  }
예제 #7
0
  public NeuralNetwork(
      int inputLayerSize, int outputLayerSize, int numberOfhidden, int hiddenLayerSizes) {
    try {
      int count = 0;
      inputLayer = new NeuralLayer(0);
      for (int i = 0; i < inputLayerSize; i++) {
        InputNeuron neuron = new InputNeuron(count);
        inputLayer.getNeurons().add(neuron);
        mapNeuronToID(neuron);
        count++;
      }

      hiddenLayers = new ArrayList<NeuralLayer>();
      for (int i = 0; i < numberOfhidden; i++) {
        NeuralLayer layer = new NeuralLayer(1 + i);
        for (int j = 0; j < hiddenLayerSizes; j++) {
          SigmoidNeuron neuron = new SigmoidNeuron(count);
          layer.getNeurons().add(neuron);
          mapNeuronToID(neuron);
          count++;
        }
        hiddenLayers.add(layer);
      }
      outputLayer = new NeuralLayer(numberOfhidden);
      for (int i = 0; i < outputLayerSize; i++) {
        SigmoidNeuron neuron = new SigmoidNeuron(count);
        outputLayer.getNeurons().add(neuron);
        mapNeuronToID(neuron);
        count++;
      }

      setBiasNeurons(count);
      if (hiddenLayers.size() == 0) {
        connectLayers(inputLayer, outputLayer);
      } else {
        connectAllLayers(null);
      }
      updateAllNeuronsMap();
    } catch (DuplicateNeuronID_Exception e) {
      e.printStackTrace();
    }
  }
예제 #8
0
  public NeuralNetwork(NeuralLayer in, List<NeuralLayer> hidden, NeuralLayer out)
      throws DuplicateNeuronID_Exception {
    inputLayer = in;
    outputLayer = out;
    hiddenLayers = hidden;

    for (Neuron neuron : inputLayer.getNeurons()) {
      mapNeuronToID(neuron);
    }
    for (NeuralLayer layer : hiddenLayers) {
      for (Neuron neuron : layer.getNeurons()) {
        mapNeuronToID(neuron);
      }
    }
    for (Neuron neuron : outputLayer.getNeurons()) {
      mapNeuronToID(neuron);
    }
    connectAllLayers(null);
    update();
  }
  public static PMML generateSimpleNeuralNetwork(
      String modelName,
      String[] inputfieldNames,
      String[] outputfieldNames,
      double[] inputMeans,
      double[] inputStds,
      double[] outputMeans,
      double[] outputStds,
      int hiddenSize,
      double[] weights) {

    int counter = 0;
    int wtsIndex = 0;
    PMML pmml = new PMML();
    pmml.setVersion("4.0");

    Header header = new Header();
    Application app = new Application();
    app.setName("Drools PMML Generator");
    app.setVersion("0.01 Alpha");
    header.setApplication(app);

    header.setCopyright("BSD");

    header.setDescription(" Smart Vent Model ");

    Timestamp ts = new Timestamp();
    ts.getContent().add(new java.util.Date().toString());
    header.setTimestamp(ts);

    pmml.setHeader(header);

    DataDictionary dic = new DataDictionary();
    dic.setNumberOfFields(BigInteger.valueOf(inputfieldNames.length + outputfieldNames.length));
    for (String ifld : inputfieldNames) {
      DataField dataField = new DataField();
      dataField.setName(ifld);
      dataField.setDataType(DATATYPE.DOUBLE);
      dataField.setDisplayName(ifld);
      dataField.setOptype(OPTYPE.CONTINUOUS);
      dic.getDataFields().add(dataField);
    }
    for (String ofld : outputfieldNames) {
      DataField dataField = new DataField();
      dataField.setName(ofld);
      dataField.setDataType(DATATYPE.DOUBLE);
      dataField.setDisplayName(ofld);
      dataField.setOptype(OPTYPE.CONTINUOUS);
      dic.getDataFields().add(dataField);
    }

    pmml.setDataDictionary(dic);

    NeuralNetwork nnet = new NeuralNetwork();
    nnet.setActivationFunction(ACTIVATIONFUNCTION.LOGISTIC);
    nnet.setFunctionName(MININGFUNCTION.REGRESSION);
    nnet.setNormalizationMethod(NNNORMALIZATIONMETHOD.NONE);
    nnet.setModelName(modelName);

    MiningSchema miningSchema = new MiningSchema();
    for (String ifld : inputfieldNames) {
      MiningField mfld = new MiningField();
      mfld.setName(ifld);
      mfld.setOptype(OPTYPE.CONTINUOUS);
      mfld.setUsageType(FIELDUSAGETYPE.ACTIVE);
      miningSchema.getMiningFields().add(mfld);
    }
    for (String ofld : outputfieldNames) {
      MiningField mfld = new MiningField();
      mfld.setName(ofld);
      mfld.setOptype(OPTYPE.CONTINUOUS);
      mfld.setUsageType(FIELDUSAGETYPE.PREDICTED);
      miningSchema.getMiningFields().add(mfld);
    }

    nnet.getExtensionsAndNeuralLayersAndNeuralInputs().add(miningSchema);

    Output outputs = new Output();
    for (String ofld : outputfieldNames) {
      OutputField outFld = new OutputField();
      outFld.setName("Out_" + ofld);
      outFld.setTargetField(ofld);
      outputs.getOutputFields().add(outFld);
    }

    nnet.getExtensionsAndNeuralLayersAndNeuralInputs().add(outputs);

    NeuralInputs nins = new NeuralInputs();
    nins.setNumberOfInputs(BigInteger.valueOf(inputfieldNames.length));

    for (int j = 0; j < inputfieldNames.length; j++) {
      String ifld = inputfieldNames[j];
      NeuralInput nin = new NeuralInput();
      nin.setId("" + counter++);
      DerivedField der = new DerivedField();
      der.setDataType(DATATYPE.DOUBLE);
      der.setOptype(OPTYPE.CONTINUOUS);
      NormContinuous nc = new NormContinuous();
      nc.setField(ifld);
      nc.setOutliers(OUTLIERTREATMENTMETHOD.AS_IS);
      LinearNorm lin1 = new LinearNorm();
      lin1.setOrig(0);
      lin1.setNorm(-inputMeans[j] / inputStds[j]);
      nc.getLinearNorms().add(lin1);
      LinearNorm lin2 = new LinearNorm();
      lin2.setOrig(inputMeans[j]);
      lin2.setNorm(0);
      nc.getLinearNorms().add(lin2);
      der.setNormContinuous(nc);
      nin.setDerivedField(der);
      nins.getNeuralInputs().add(nin);
    }

    nnet.getExtensionsAndNeuralLayersAndNeuralInputs().add(nins);

    NeuralLayer hidden = new NeuralLayer();
    hidden.setNumberOfNeurons(BigInteger.valueOf(hiddenSize));

    for (int j = 0; j < hiddenSize; j++) {
      Neuron n = new Neuron();
      n.setId("" + counter++);
      n.setBias(weights[wtsIndex++]);
      for (int k = 0; k < inputfieldNames.length; k++) {
        Synapse con = new Synapse();
        con.setFrom("" + k);
        con.setWeight(weights[wtsIndex++]);
        n.getCons().add(con);
      }
      hidden.getNeurons().add(n);
    }

    nnet.getExtensionsAndNeuralLayersAndNeuralInputs().add(hidden);

    NeuralLayer outer = new NeuralLayer();
    outer.setActivationFunction(ACTIVATIONFUNCTION.IDENTITY);
    outer.setNumberOfNeurons(BigInteger.valueOf(outputfieldNames.length));

    for (int j = 0; j < outputfieldNames.length; j++) {
      Neuron n = new Neuron();
      n.setId("" + counter++);
      n.setBias(weights[wtsIndex++]);
      for (int k = 0; k < hiddenSize; k++) {
        Synapse con = new Synapse();
        con.setFrom("" + (k + inputfieldNames.length));
        con.setWeight(weights[wtsIndex++]);
        n.getCons().add(con);
      }
      outer.getNeurons().add(n);
    }

    nnet.getExtensionsAndNeuralLayersAndNeuralInputs().add(outer);

    NeuralOutputs finalOuts = new NeuralOutputs();
    finalOuts.setNumberOfOutputs(BigInteger.valueOf(outputfieldNames.length));
    for (int j = 0; j < outputfieldNames.length; j++) {
      NeuralOutput output = new NeuralOutput();
      output.setOutputNeuron("" + (j + inputfieldNames.length + hiddenSize));
      DerivedField der = new DerivedField();
      der.setDataType(DATATYPE.DOUBLE);
      der.setOptype(OPTYPE.CONTINUOUS);
      NormContinuous nc = new NormContinuous();
      nc.setField(outputfieldNames[j]);
      nc.setOutliers(OUTLIERTREATMENTMETHOD.AS_IS);
      LinearNorm lin1 = new LinearNorm();
      lin1.setOrig(0);
      lin1.setNorm(-outputMeans[j] / outputStds[j]);
      nc.getLinearNorms().add(lin1);
      LinearNorm lin2 = new LinearNorm();
      lin2.setOrig(outputMeans[j]);
      lin2.setNorm(0);
      nc.getLinearNorms().add(lin2);
      der.setNormContinuous(nc);
      output.setDerivedField(der);
      finalOuts.getNeuralOutputs().add(output);
    }

    nnet.getExtensionsAndNeuralLayersAndNeuralInputs().add(finalOuts);

    pmml.getAssociationModelsAndBaselineModelsAndClusteringModels().add(nnet);

    return pmml;
  }
예제 #10
0
  private void adaptation(
      SetOfIOPairs trainingSet, int maxK, double eps, double lambda, double micro) {
    //         trainingSet  : trenovacia mnozina
    //         maxK         : maximalny pocet iteracii
    //         eps          : pozadovana presnost normovanej dlzky gradientu
    //         lambda       : rychlost ucenia (0.1)
    //         micro        : momentovy clen
    double delta;
    Gradients deltaGradients = new Gradients(this);
    Gradients totalGradients = new Gradients(this);
    Gradients partialGradients = new Gradients(this);

    System.out.println("setting up random weights and thresholds ...");

    // prahy a vahy neuronovej siete nastavime na nahodne hodnoty, delta-gradienty vynulujeme (oni
    // sa nuluju uz pri init)
    for (int il = this.numberOfLayers() - 1;
        il >= 1;
        il--) { // iteracia cez vsetky vrstvy nadol okrem poslednej
      NeuralLayer currentLayer = this.getLayer(il);
      for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // pre kazdy neuron na vrstve
        Neuron currentNeuron = currentLayer.getNeuron(in);
        currentNeuron.threshold = 2 * this.random() - 1;
        // deltaGradients.setThreshold(il,in,0.0);
        for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) {
          currentNeuron.getInput(ii).weight = 2 * this.random() - 1;
          // deltaGradients.setWeight(il,in,ii,0.0);
        } // end ii
      } // end in
    } // end il

    int currK = 0; // citac iteracii
    double currE =
        Double.POSITIVE_INFINITY; // pociatocna aktualna presnost bude nekonecna (tendencia
    // znizovania)

    System.out.println("entering adaptation loop ... (maxK = " + maxK + ")");

    while (currK < maxK && currE > eps) {
      computeTotalGradient(totalGradients, partialGradients, trainingSet);
      for (int il = this.numberOfLayers() - 1;
          il >= 1;
          il--) { // iteracia cez vsetky vrstvy nadol okrem poslednej
        NeuralLayer currentLayer = this.getLayer(il);

        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // pre kazdy neuron na vrstve
          Neuron currentNeuron = currentLayer.getNeuron(in);
          delta =
              -lambda * totalGradients.getThreshold(il, in)
                  + micro * deltaGradients.getThreshold(il, in);
          currentNeuron.threshold += delta;
          deltaGradients.setThreshold(il, in, delta);
        } // end for ii 1

        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // pre kazdy neuron na vrstve
          Neuron currentNeuron = currentLayer.getNeuron(in);
          for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) { // a pre kazdy vstup neuronu
            delta =
                -lambda * totalGradients.getWeight(il, in, ii)
                    + micro * deltaGradients.getWeight(il, in, ii);
            currentNeuron.getInput(ii).weight += delta;
            deltaGradients.setWeight(il, in, ii, delta);
          } // end for ii
        } // end for in 2
      } // end for il

      currE = totalGradients.getGradientAbs();
      currK++;
      if (currK % 25 == 0) System.out.println("currK=" + currK + "   currE=" + currE);
    } // end while
  }
예제 #11
0
  private void computeGradient(
      Gradients gradients, Vector<Double> inputs, Vector<Double> requiredOutputs) {
    // Gradients gradients = new Gradients(this);
    activities(inputs);
    for (int il = this.numberOfLayers() - 1;
        il >= 1;
        il--) { // backpropagation cez vsetky vrstvy okrem poslednej
      NeuralLayer currentLayer = this.getLayer(il);

      if (currentLayer.isLayerTop()) { // ak sa jedna o najvyssiu vrstvu
        // pridame gradient prahov pre danu vrstvu do odpovedajuceho vektora a tento gradient
        // pocitame cez neurony :
        // gradients.thresholds.add(il, new Vector<Double>());
        for (int in = 0;
            in < currentLayer.numberOfNeurons();
            in++) { // pre vsetky neurony na vrstve
          Neuron currentNeuron = currentLayer.getNeuron(in);
          gradients.setThreshold(
              il,
              in,
              currentNeuron.output
                  * (1 - currentNeuron.output)
                  * (currentNeuron.output - requiredOutputs.elementAt(in)));
        } // end for each neuron

        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // for each neuron
          Neuron currentNeuron = currentLayer.getNeuron(in);
          for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) { // for each neuron's input
            NeuralInput currentInput = currentNeuron.getInput(ii);
            gradients.setWeight(
                il,
                in,
                ii,
                gradients.getThreshold(il, in) * currentLayer.lowerLayer().getNeuron(ii).output);
          } // end for each input
        } // end for each neuron

      } else { // ak sa jedna o spodnejsie vrstvy (najnizsiu vrstvu nepocitame, ideme len po 1.)
        // pocitame gradient prahov :
        // gradients.thresholds.add(il, new Vector<Double>());
        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // for each neuron
          double aux = 0;
          // iterujeme cez vsetky axony neuronu (resp. synapsie neuronov na vyssej vrstve)
          for (int ia = 0; ia < currentLayer.upperLayer().numberOfNeurons(); ia++) {
            aux +=
                gradients.getThreshold(il + 1, ia)
                    * currentLayer.upperLayer().getNeuron(ia).getInput(in).weight;
          }
          gradients.setThreshold(
              il,
              in,
              currentLayer.getNeuron(in).output * (1 - currentLayer.getNeuron(in).output) * aux);
        } // end for each neuron

        // pocitame gradienty vah :
        for (int in = 0; in < currentLayer.numberOfNeurons(); in++) { // for each neuron
          Neuron currentNeuron = currentLayer.getNeuron(in);
          for (int ii = 0; ii < currentNeuron.numberOfInputs(); ii++) { // for each neuron's input
            NeuralInput currentInput = currentNeuron.getInput(ii);
            gradients.setWeight(
                il,
                in,
                ii,
                gradients.getThreshold(il, in) * currentLayer.lowerLayer().getNeuron(ii).output);
          } // end for each input
        } // end for each neuron
      } // end layer IF
    } // end backgropagation for each layer
    // return gradients;
  }
예제 #12
0
 public void setBiaInputs(double value) {
   for (NeuralLayer layer : hiddenLayers) {
     layer.getBiasNeuron().setOutputValue(value);
   }
   outputLayer.getBiasNeuron().setOutputValue(value);
 }