Beispiel #1
0
  /**
   * Find the most likely person based on a detection. Returns the index, and stores the confidence
   * value into pConfidence.
   *
   * @param projectedTestFace the projected test face
   * @param pConfidencePointer a pointer containing the confidence value
   * @param iTestFace the test face index
   * @return the index
   */
  private int findNearestNeighbor(float projectedTestFace[], FloatPointer pConfidencePointer) {
    double leastDistSq = Double.MAX_VALUE;
    int i = 0;
    int iTrain = 0;
    int iNearest = 0;

    LOGGER.info("................");
    LOGGER.info("find nearest neighbor from " + nTrainFaces + " training faces");
    for (iTrain = 0; iTrain < nTrainFaces; iTrain++) {
      // LOGGER.info("considering training face " + (iTrain + 1));
      double distSq = 0;

      for (i = 0; i < nEigens; i++) {
        // LOGGER.debug("  projected test face distance from eigenface " + (i + 1) + " is " +
        // projectedTestFace[i]);

        float projectedTrainFaceDistance = (float) projectedTrainFaceMat.get(iTrain, i);
        float d_i = projectedTestFace[i] - projectedTrainFaceDistance;
        distSq +=
            d_i
                * d_i; // / eigenValMat.data_fl().get(i);  // Mahalanobis distance (might give
                       // better results than Eucalidean distance)
        //          if (iTrain < 5) {
        //            LOGGER.info("    ** projected training face " + (iTrain + 1) + " distance from
        // eigenface " + (i + 1) + " is " + projectedTrainFaceDistance);
        //            LOGGER.info("    distance between them " + d_i);
        //            LOGGER.info("    distance squared " + distSq);
        //          }
      }

      if (distSq < leastDistSq) {
        leastDistSq = distSq;
        iNearest = iTrain;
        LOGGER.info(
            "  training face "
                + (iTrain + 1)
                + " is the new best match, least squared distance: "
                + leastDistSq);
      }
    }

    // Return the confidence level based on the Euclidean distance,
    // so that similar images should give a confidence between 0.5 to 1.0,
    // and very different images should give a confidence between 0.0 to 0.5.
    float pConfidence =
        (float) (1.0f - Math.sqrt(leastDistSq / (float) (nTrainFaces * nEigens)) / 255.0f);
    pConfidencePointer.put(pConfidence);

    LOGGER.info(
        "training face " + (iNearest + 1) + " is the final best match, confidence " + pConfidence);
    return iNearest;
  }
  public void recognizeFileList(final String szFileTest) {
    System.out.println("===========================================");
    System.out.println("recognizing faces indexed from " + szFileTest);
    int i = 0;
    int nTestFaces = 0; // the number of test images
    CvMat trainPersonNumMat; // the person numbers during training
    float[] projectedTestFace;
    String answer;
    int nCorrect = 0;
    int nWrong = 0;
    double timeFaceRecognizeStart;
    double tallyFaceRecognizeTime;
    float confidence = 0.0f;

    // load test images and ground truth for person number
    testFaceImgArr = loadFaceImgArray(szFileTest);
    nTestFaces = testFaceImgArr.length;

    System.out.println(nTestFaces + " test faces loaded");

    // load the saved training data
    trainPersonNumMat = loadTrainingData();
    if (trainPersonNumMat == null) {
      return;
    }

    // project the test images onto the PCA subspace
    projectedTestFace = new float[nEigens];
    timeFaceRecognizeStart = (double) cvGetTickCount(); // Record the timing.

    for (i = 0; i < nTestFaces; i++) {
      int iNearest;
      int nearest;
      int truth;

      // project the test image onto the PCA subspace
      cvEigenDecomposite(
          testFaceImgArr[i], // obj
          nEigens, // nEigObjs
          eigenVectArr, // eigInput (Pointer)
          0, // ioFlags
          null, // userData
          pAvgTrainImg, // avg
          projectedTestFace); // coeffs

      // LOGGER.info("projectedTestFace\n" + floatArrayToString(projectedTestFace));

      final FloatPointer pConfidence = new FloatPointer(confidence);
      iNearest = findNearestNeighbor(projectedTestFace, new FloatPointer(pConfidence));
      confidence = pConfidence.get();
      truth = personNumTruthMat.data_i().get(i);
      nearest = trainPersonNumMat.data_i().get(iNearest);

      if (nearest == truth) {
        answer = "Correct";
        nCorrect++;
      } else {
        answer = "WRONG!";
        nWrong++;
      }
      System.out.println(
          "nearest = "
              + nearest
              + ", Truth = "
              + truth
              + " ("
              + answer
              + "). Confidence = "
              + confidence);
    }
    tallyFaceRecognizeTime = (double) cvGetTickCount() - timeFaceRecognizeStart;
    if (nCorrect + nWrong > 0) {
      System.out.println(
          "TOTAL ACCURACY: "
              + (nCorrect * 100 / (nCorrect + nWrong))
              + "% out of "
              + (nCorrect + nWrong)
              + " tests.");
      System.out.println(
          "TOTAL TIME: "
              + (tallyFaceRecognizeTime / (cvGetTickFrequency() * 1000.0 * (nCorrect + nWrong)))
              + " ms average.");
    }
  }
  private IplImage[] loadFaceImgArray(String filename) {
    IplImage[] faceImgArr;
    BufferedReader imgListFile;
    String imgFilename;
    int iFace = 0;
    int nFaces = 0;
    int i;

    try {
      imgListFile = new BufferedReader(new FileReader(filename));

      while (true) {
        final String line = imgListFile.readLine();
        if (line == null || line.isEmpty()) break;
        nFaces++;
      }
      System.out.println("loadingImgArray-> liczba plikow: " + nFaces);
      imgListFile = new BufferedReader(new FileReader(filename));

      faceImgArr = new IplImage[nFaces];
      personNumTruthMat =
          cvCreateMat(
              1, // rows
              nFaces, // cols
              CV_32SC1 // type 32 bit unsigned 1 ch
              );
      // initialize for debugging
      for (int j1 = 0; j1 < nFaces; j1++) {
        personNumTruthMat.put(0, j1, 0);
      }
      personNames.clear();
      nPersons = 0;
      for (iFace = 0; iFace < nFaces; iFace++) {
        String personName;
        String sPersonName;
        int personNumber;

        // reading person number
        final String line = imgListFile.readLine();
        if (line.isEmpty()) {
          break;
        }
        final String[] tokens = line.split(" ");
        personNumber = Integer.parseInt(tokens[0]);
        personName = tokens[1];
        imgFilename = tokens[2];
        sPersonName = personName;
        System.out.println(
            "Got: " + iFace + " " + personNumber + " " + personName + " " + imgFilename);
        // check if a new person is beaing loaded
        if (personNumber > nPersons) {
          // allocate memory
          personNames.add(sPersonName);
          nPersons = personNumber;
          System.out.println(
              "Added new person"
                  + sPersonName
                  + " ->nPersons = "
                  + nPersons
                  + " ["
                  + personNames.size()
                  + "] ");
        }

        // keep the data
        personNumTruthMat.put(
            0, // i
            iFace, // j
            personNumber); // v
        // load the face img
        faceImgArr[iFace] =
            cvLoadImage(
                imgFilename, // filename
                CV_LOAD_IMAGE_GRAYSCALE // is color
                );

        if (faceImgArr[iFace] == null) {
          throw new RuntimeException("Can't load image from " + imgFilename);
        }
      }

      imgListFile.close();
    } catch (IOException ex) {
      throw new RuntimeException(ex);
    }
    System.out.println(
        "Data loaded from" + filename + "': (" + nFaces + " images of " + nPersons + " people).");

    return faceImgArr;
  }
  // PCA wymaga poprawek ale zeby to sprawdzic musza byc wszystkie metody zrobione
  // PCA to glowna metoda algorytmu eigenface. To ona 'rozbija' twarz na wektory
  // i normalizuje dzieki czemu mozna je potem porownywac
  // i szukac najmniejszego 'distance'
  private void doPCA() {
    int i;
    CvTermCriteria calcLimit;
    CvSize faceImgSize = new CvSize();

    // set the number of eigenvalues to use
    nEigens = nTrainFaces - 1;

    // allocate the eigenvector images
    faceImgSize.width(trainingFaceImgArr[0].width());
    faceImgSize.height(trainingFaceImgArr[0].height());
    eigenVectArr = new IplImage[nEigens];

    for (i = 0; i < nEigens; i++) {
      eigenVectArr[i] =
          cvCreateImage(
              faceImgSize, // size
              IPL_DEPTH_32F, // depth
              1); // channels
    }
    // System.out.println(calcLimit);
    // allocate the eigenvalue array
    eigenValMat =
        cvCreateMat(
            1, // rows
            nEigens, // cols
            CV_32FC1); // type, 32-bit float, 1 channel

    // allocate the averaged image
    pAvgTrainImg =
        cvCreateImage(
            faceImgSize, // size
            IPL_DEPTH_32F, // depth
            1); // channels

    // set the PCA termination criterion
    calcLimit =
        cvTermCriteria(
            CV_TERMCRIT_ITER, // type
            nEigens, // max_iter
            1); // epsilon
    // compute average image, eigenvalues, and eigenvectors TU JEST PROBLEM
    cvCalcEigenObjects(
        nTrainFaces, // nObjects
        trainingFaceImgArr, // input
        eigenVectArr, // output
        CV_EIGOBJ_NO_CALLBACK, // ioFlags
        0, // ioBufSize
        null, // userData
        calcLimit,
        pAvgTrainImg, // avg
        eigenValMat.data_fl()); // eigVals
    // normalizacja czyli odjecie avarage face
    cvNormalize(
        eigenValMat, // src (CvArr)
        eigenValMat, // dst (CvArr)
        1, // a
        0, // b
        CV_L1, // norm_type
        null); // mask

    System.out.println("PCA done.");
  }
Beispiel #5
0
  /* From a normalized image file to recognized if match current trained data.
   */
  public void recognize(String tmpDetectImageOutput, String trainedOutput, String personName) {

    File folder = new File(tmpDetectImageOutput);
    File[] listOfFiles = folder.listFiles();
    ArrayList<String> testFaceFileNames = new ArrayList<String>();

    String answer = "";
    // load image to testFaces array list
    for (int i = 0; i < listOfFiles.length; i++) {
      if (listOfFiles[i].isFile()) {
        String file = listOfFiles[i].getName();
        String filepath = tmpDetectImageOutput + "/" + file;
        IplImage tmpImage = cvLoadImage(filepath, CV_LOAD_IMAGE_GRAYSCALE);
        if (tmpImage != null) {
          testFaces.add(tmpImage);
          testFaceFileNames.add(filepath);
        }
      }
    }

    CvMat trainPersonNumMat = loadTrainingData(trainedOutput, personName);

    //    int ntestfaces = testFaces.size() ;
    int nTestFaces = testFaces.size();
    LOGGER.info(trainedOutput + "/" + personName + ".xml");
    System.out.println("total: " + nTestFaces + " to be tested latter...");

    personNumTruthMat = cvCreateMat(1, nTestFaces, CV_32SC1); // type, 32-

    float[] projectedTestFace = new float[nEigens];
    float confidence = 0.0f;
    int nCorrect = 0;
    int nWrong = 0;
    double timeFaceRecognizeStart = (double) cvGetTickCount(); // supposedly to record the timing??
    for (int i = 0; i < nTestFaces; i++) {
      int iNearest;
      int nearest;
      int truth;

      // project the test image onto the PCA subspace
      LOGGER.info("before find decomposite..");
      cvEigenDecomposite(
          testFaces.get(i),
          nEigens, // nEigObjs
          eigenVectArr, // eigInput (Pointer)
          0, // ioFlags
          null, // userData
          pAvgTrainImg, // avg
          projectedTestFace); // coeffs

      // LOGGER.info("projectedTestFace\n" + floatArrayToString(projectedTestFace));

      final FloatPointer pConfidence = new FloatPointer(confidence);
      LOGGER.info("before find nearest...");
      iNearest = findNearestNeighbor(projectedTestFace, new FloatPointer(pConfidence));
      confidence = pConfidence.get();
      truth = personNumTruthMat.data_i().get(i);
      nearest = trainPersonNumMat.data_i().get(iNearest);

      if (nearest == truth) {
        answer = "Correct";
        nCorrect++;
      } else {
        answer = "WRONG!";
        nWrong++;
      }
      LOGGER.info(testFaceFileNames.get(i));
      LOGGER.info(
          "nearest = "
              + nearest
              + ", Truth = "
              + truth
              + " ("
              + answer
              + "). Confidence = "
              + confidence);
    }
  }
Beispiel #6
0
  public void learn(String tmpImageDataOutput) {

    File folder = new File(tmpImageDataOutput);
    File[] listOfFiles = folder.listFiles();
    // load image to trainingFaces
    for (int i = 0; i < listOfFiles.length; i++) {
      if (listOfFiles[i].isFile()) {
        String file = listOfFiles[i].getName();
        String filepath = tmpImageDataOutput + "/" + file;
        IplImage tmpImage = cvLoadImage(filepath, CV_LOAD_IMAGE_GRAYSCALE);
        if (tmpImage != null) {
          trainingFaces.add(tmpImage);
        }
      }
    }
    int nfaces = trainingFaces.size();
    System.out.println("total: " + nfaces);
    // Does the Principal Component Analysis, finding the average image and the eigenfaces that
    // represent any image in the given dataset.....so call PCA
    // set the number of eigenvalues to use
    nTrainFaces = nfaces;
    nEigens = nTrainFaces - 1;
    CvTermCriteria calcLimit;
    CvSize faceImgSize = new CvSize();
    faceImgSize.width(trainingFaces.get(0).width());
    faceImgSize.height(trainingFaces.get(0).height());

    personNumTruthMat = cvCreateMat(1, nfaces, CV_32SC1);
    // rows ,nFaces,  type, 32-bit unsigned, one channel

    // initialize the person number matrix - for ease of debugging
    for (int j1 = 0; j1 < nfaces; j1++) {
      personNumTruthMat.put(0, j1, 0);
    }

    for (int i = 0; i < nEigens; i++) {
      eigens.add(
          cvCreateImage(
              faceImgSize, // size
              IPL_DEPTH_32F, // depth
              1)); // channels)
    }

    eigenValMat = cvCreateMat(1, nEigens, CV_32FC1); // type, 32-bit float, 1 channel
    // allocate the averaged image
    pAvgTrainImg =
        cvCreateImage(
            faceImgSize, // size
            IPL_DEPTH_32F, // depth
            1); // channels

    // set the PCA termination criterion
    calcLimit =
        cvTermCriteria(
            CV_TERMCRIT_ITER, // type
            nEigens, // max_iter
            1); // epsilon

    LOGGER.info("computing average image, eigenvalues and eigenvectors");
    // compute average image, eigenvalues, and eigenvectors
    eigenVectArr = eigens.toArray(new IplImage[eigens.size()]);
    cvCalcEigenObjects(
        nTrainFaces, // nObjects
        trainingFaces.toArray(new IplImage[trainingFaces.size()]), // input
        eigenVectArr, // the output is array... need to transfer to arrayList latter
        CV_EIGOBJ_NO_CALLBACK, // ioFlags
        0, // ioBufSize
        null, // userData
        calcLimit,
        pAvgTrainImg, // avg
        eigenValMat.data_fl()); // eigVals

    // eigens = (ArrayList) Arrays.asList(eigenVectArr)  ;

    LOGGER.info("normalizing the eigenvectors");
    cvNormalize(
        eigenValMat, // src (CvArr)
        eigenValMat, // dst (CvArr)
        1, // a
        0, // b
        CV_L1, // norm_type
        null); // mask

    LOGGER.info("projecting the training images onto the PCA subspace");
    // project the training images onto the PCA subspace
    projectedTrainFaceMat =
        cvCreateMat(
            nTrainFaces, // rows
            nEigens, // cols
            CV_32FC1); // type, 32-bit float, 1 channel

    // initialize the training face matrix - for ease of debugging
    for (int i1 = 0; i1 < nTrainFaces; i1++) {
      for (int j1 = 0; j1 < nEigens; j1++) {
        projectedTrainFaceMat.put(i1, j1, 0.0);
      }
    }

    LOGGER.info(
        "created projectedTrainFaceMat with "
            + nTrainFaces
            + " (nTrainFaces) rows and "
            + nEigens
            + " (nEigens) columns");
    /**
     * nTrainFace should always > 5 !!!!!!!!!!!!!!!! if (nTrainFaces < 5) {
     * LOGGER.info("projectedTrainFaceMat contents:\n" +
     * oneChannelCvMatToString(projectedTrainFaceMat)); }
     */
    final FloatPointer floatPointer = new FloatPointer(nEigens);
    for (int i = 0; i < nTrainFaces; i++) {
      cvEigenDecomposite(
          trainingFaces.get(i), // obj
          nEigens, // nEigObjs
          eigenVectArr, // eigInput (Pointer)
          0, // ioFlags
          null, // userData (Pointer)
          pAvgTrainImg, // avg
          floatPointer); // coeffs (FloatPointer)

      /*nTrainFace should always > 5 !!!!!!!!!!!!!!!!
      if (nTrainFaces < 5) {
        LOGGER.info("floatPointer: " + floatPointerToString(floatPointer));
      }*/
      for (int j1 = 0; j1 < nEigens; j1++) {
        projectedTrainFaceMat.put(i, j1, floatPointer.get(j1));
      }
    }
    /*nTrainFace should always > 5 !!!!!!!!!!!!!!!!
        if (nTrainFaces < 5) {
          LOGGER.info("projectedTrainFaceMat after cvEigenDecomposite:\n" + projectedTrainFaceMat);
        }
    */
    // store the recognition data as an xml file
    //    storeTrainingData();

    // Save all the eigenvectors as images, so that they can be checked.
    //  storeEigenfaceImages();

  }