コード例 #1
0
  /**
   * Process all the data in the training data set to learn the classifications. See code for
   * details.
   */
  public void learnAndSave() {
    System.out.println("======== Learning Classifier");

    // Either load pre-computed words or compute the words from the training images
    AssignCluster<double[]> assignment;
    if (new File(CLUSTER_FILE_NAME).exists()) {
      assignment = UtilIO.load(CLUSTER_FILE_NAME);
    } else {
      System.out.println(" Computing clusters");
      assignment = computeClusters();
    }

    // Use these clusters to assign features to words
    FeatureToWordHistogram_F64 featuresToHistogram =
        new FeatureToWordHistogram_F64(assignment, HISTOGRAM_HARD);

    // Storage for the work histogram in each image in the training set and their label
    List<HistogramScene> memory;

    if (!new File(HISTOGRAM_FILE_NAME).exists()) {
      System.out.println(" computing histograms");
      memory = computeHistograms(featuresToHistogram);
      UtilIO.save(memory, HISTOGRAM_FILE_NAME);
    }
  }
コード例 #2
0
  public static void main(String[] args) {

    DescribeImageDense<ImageUInt8, TupleDesc_F64> desc =
        (DescribeImageDense)
            FactoryDescribeImageDense.surfFast(
                null, new ConfigDenseSample(DESC_SCALE, DESC_SKIP, DESC_SKIP), ImageUInt8.class);

    ComputeClusters<double[]> clusterer =
        FactoryClustering.kMeans_F64(null, MAX_KNN_ITERATIONS, 20, 1e-6);
    clusterer.setVerbose(true);

    NearestNeighbor<HistogramScene> nn = FactoryNearestNeighbor.exhaustive();
    ExampleClassifySceneKnn example = new ExampleClassifySceneKnn(desc, clusterer, nn);

    File trainingDir = new File(UtilIO.pathExample("learning/scene/train"));
    File testingDir = new File(UtilIO.pathExample("learning/scene/test"));

    if (!trainingDir.exists() || !testingDir.exists()) {
      System.err.println(
          "Please follow instructions in data/applet/learning/scene and download the");
      System.err.println("required files");
      System.exit(1);
    }

    example.loadSets(trainingDir, null, testingDir);
    // train the classifier
    example.learnAndSave();
    // now load it for evaluation purposes from the files
    example.loadAndCreateClassifier();

    // test the classifier on the test set
    Confusion confusion = example.evaluateTest();
    confusion.getMatrix().print();
    System.out.println("Accuracy = " + confusion.computeAccuracy());

    // Show confusion matrix
    // Not the best coloration scheme...  perfect = red diagonal and blue elsewhere.
    ShowImages.showWindow(
        new ConfusionMatrixPanel(confusion.getMatrix(), 400, true), "Confusion Matrix", true);

    // For  "fast"  SURF descriptor the accuracy is 52.2%
    // For "stable" SURF descriptor the accuracy is 49.4%

    // This is interesting. When matching images "stable" is significantly better than "fast"
    // One explanation is that the descriptor for "fast" samples a smaller region than "stable", by
    // a
    // couple of pixels at scale of 1.  Thus there is less overlap between the features.

    // Reducing the size of "stable" to 0.95 does slightly improve performance to 50.5%, can't scale
    // it down
    // much more without performance going down
  }
コード例 #3
0
  /**
   * Extract dense features across the training set. Then clusters are found within those features.
   */
  private AssignCluster<double[]> computeClusters() {
    System.out.println("Image Features");

    // computes features in the training image set
    features.reset();
    for (String scene : train.keySet()) {
      List<String> imagePaths = train.get(scene);
      System.out.println("   " + scene);

      for (String path : imagePaths) {
        ImageUInt8 image = UtilImageIO.loadImage(path, ImageUInt8.class);
        describeImage.process(image, features, null);
      }
    }
    // add the features to the overall list which the clusters will be found inside of
    for (int i = 0; i < features.size; i++) {
      cluster.addReference(features.get(i));
    }

    System.out.println("Clustering");
    // Find the clusters.  This can take a bit
    cluster.process(NUMBER_OF_WORDS);

    UtilIO.save(cluster.getAssignment(), CLUSTER_FILE_NAME);

    return cluster.getAssignment();
  }
コード例 #4
0
  public void loadAndCreateClassifier() {
    // load results from a file
    List<HistogramScene> memory = UtilIO.load(HISTOGRAM_FILE_NAME);
    AssignCluster<double[]> assignment = UtilIO.load(CLUSTER_FILE_NAME);

    FeatureToWordHistogram_F64 featuresToHistogram =
        new FeatureToWordHistogram_F64(assignment, HISTOGRAM_HARD);

    // Provide the training results to K-NN and it will preprocess these results for quick lookup
    // later on
    // Can use this classifier with saved results and avoid the

    classifier =
        new ClassifierKNearestNeighborsBow<ImageUInt8, TupleDesc_F64>(
            nn, describeImage, featuresToHistogram);
    classifier.setClassificationData(memory, getScenes().size());
    classifier.setNumNeighbors(NUM_NEIGHBORS);
  }
コード例 #5
0
  public static void main(String args[]) {
    DemoBinaryImageOpsApp app = new DemoBinaryImageOpsApp(GrayF32.class);

    java.util.List<PathLabel> inputs = new ArrayList<>();
    inputs.add(new PathLabel("particles", UtilIO.pathExample("particles01.jpg")));
    inputs.add(new PathLabel("shapes", UtilIO.pathExample("shapes/shapes01.png")));

    app.setInputList(inputs);

    // wait for it to process one image so that the size isn't all screwed up
    while (!app.getHasProcessedImage()) {
      Thread.yield();
    }

    ShowImages.showWindow(app, "Binary Image Ops", true);

    System.out.println("Done");
  }
コード例 #6
0
  public static void main(String[] args) {

    // Example with a moving camera.  Highlights why motion estimation is sometimes required
    String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
    // Camera has a bit of jitter in it.  Static kinda works but motion reduces false positives
    //		String fileName = UtilIO.pathExample("background/horse_jitter.mp4");

    // Comment/Uncomment to switch input image type
    ImageType imageType = ImageType.single(GrayF32.class);
    //		ImageType imageType = ImageType.il(3, InterleavedF32.class);
    //		ImageType imageType = ImageType.il(3, InterleavedU8.class);

    // Configure the feature detector
    ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
    confDetector.threshold = 10;
    confDetector.maxFeatures = 300;
    confDetector.radius = 6;

    // Use a KLT tracker
    PointTracker tracker =
        FactoryPointTracker.klt(new int[] {1, 2, 4, 8}, confDetector, 3, GrayF32.class, null);

    // This estimates the 2D image motion
    ImageMotion2D<GrayF32, Homography2D_F64> motion2D =
        FactoryMotion2D.createMotion2D(
            500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());

    ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);

    // Configuration for Gaussian model.  Note that the threshold changes depending on the number of
    // image bands
    // 12 = gray scale and 40 = color
    ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
    configGaussian.initialVariance = 64;
    configGaussian.minimumDifference = 5;

    // Comment/Uncomment to switch background mode
    BackgroundModelMoving background =
        FactoryBackgroundModel.movingBasic(
            configBasic, new PointTransformHomography_F32(), imageType);
    //				FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(),
    // imageType);

    MediaManager media = DefaultMediaManager.INSTANCE;
    SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
    //				media.openCamera(null,640,480,background.getImageType());

    // ====== Initialize Images

    // storage for segmented image.  Background = 0, Foreground = 1
    GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight());
    // Grey scale image that's the input for motion estimation
    GrayF32 grey = new GrayF32(segmented.width, segmented.height);

    // coordinate frames
    Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
    Homography2D_F32 homeToWorld = new Homography2D_F32();
    homeToWorld.a13 = grey.width / 2;
    homeToWorld.a23 = grey.height / 2;

    // Create a background image twice the size of the input image.  Tell it that the home is in the
    // center
    background.initialize(grey.width * 2, grey.height * 2, homeToWorld);

    BufferedImage visualized =
        new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
    ImageGridPanel gui = new ImageGridPanel(1, 2);
    gui.setImages(visualized, visualized);

    ShowImages.showWindow(gui, "Detections", true);

    double fps = 0;
    double alpha = 0.01; // smoothing factor for FPS

    while (video.hasNext()) {
      ImageBase input = video.next();

      long before = System.nanoTime();
      GConvertImage.convert(input, grey);

      if (!motion2D.process(grey)) {
        throw new RuntimeException("Should handle this scenario");
      }

      Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
      UtilHomography.convert(firstToCurrent64, firstToCurrent32);

      background.segment(firstToCurrent32, input, segmented);
      background.updateBackground(firstToCurrent32, input);
      long after = System.nanoTime();

      fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9));

      VisualizeBinaryData.renderBinary(segmented, false, visualized);
      gui.setImage(0, 0, (BufferedImage) video.getGuiImage());
      gui.setImage(0, 1, visualized);
      gui.repaint();

      System.out.println("FPS = " + fps);

      try {
        Thread.sleep(5);
      } catch (InterruptedException e) {
      }
    }
  }