public static void main(String args[]) throws FileNotFoundException { DetectCalibrationChessApp app = new DetectCalibrationChessApp(); // String prefix = "../data/applet/calibration/mono/Sony_DSC-HX5V_Chess/"; String prefix = "../data/applet/calibration/stereo/Bumblebee2_Chess/"; app.loadConfigurationFile(prefix + "info.txt"); app.setBaseDirectory(prefix); // app.loadInputData(prefix+"images.txt"); List<PathLabel> inputs = new ArrayList<PathLabel>(); for (int i = 1; i <= 12; i++) { // inputs.add(new PathLabel(String.format("View // %02d",i),String.format("%sframe%02d.jpg",prefix,i))); inputs.add( new PathLabel(String.format("View %02d", i), String.format("%sleft%02d.jpg", prefix, i))); } app.setInputList(inputs); // wait for it to process one image so that the size isn't all screwed up while (!app.getHasProcessedImage()) { Thread.yield(); } ShowImages.showWindow(app, "Calibration Target Detection", true); }
public static void main(String args[]) { IntensityFeatureScaleSpacePyramidApp<ImageFloat32, ImageFloat32> app = new IntensityFeatureScaleSpacePyramidApp<ImageFloat32, ImageFloat32>( ImageFloat32.class, ImageFloat32.class); // IntensityFeatureScaleSpacePyramidApp<ImageUInt8, ImageSInt16> app2 = // new // IntensityFeatureScaleSpacePyramidApp<ImageUInt8,ImageSInt16>(ImageUInt8.class,ImageSInt16.class); java.util.List<PathLabel> inputs = new ArrayList<PathLabel>(); inputs.add(new PathLabel("shapes", "../data/evaluation/shapes01.png")); inputs.add(new PathLabel("sunflowers", "../data/evaluation/sunflowers.png")); inputs.add(new PathLabel("beach", "../data/evaluation/scale/beach02.jpg")); app.setInputList(inputs); // wait for it to process one image so that the size isn't all screwed up while (!app.getHasProcessedImage()) { Thread.yield(); } ShowImages.showWindow(app, "Feature Scale Space Pyramid Intensity"); }
/** * Detects contours inside the binary image generated by canny. Only the external contour is * relevant. Often easier to deal with than working with Canny edges directly. */ public static void fitCannyBinary(ImageFloat32 input) { BufferedImage displayImage = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB); ImageUInt8 binary = new ImageUInt8(input.width, input.height); // Finds edges inside the image CannyEdge<ImageFloat32, ImageFloat32> canny = FactoryEdgeDetectors.canny(2, false, true, ImageFloat32.class, ImageFloat32.class); canny.process(input, 0.1f, 0.3f, binary); List<Contour> contours = BinaryImageOps.contour(binary, 8, null); Graphics2D g2 = displayImage.createGraphics(); g2.setStroke(new BasicStroke(2)); // used to select colors for each line Random rand = new Random(234); for (Contour c : contours) { // Only the external contours are relevant. List<PointIndex_I32> vertexes = ShapeFittingOps.fitPolygon(c.external, true, toleranceDist, toleranceAngle, 100); g2.setColor(new Color(rand.nextInt())); VisualizeShapes.drawPolygon(vertexes, true, g2); } ShowImages.showWindow(displayImage, "Canny Contour"); }
/** * Fits a sequence of line-segments into a sequence of points found using the Canny edge detector. * In this case the points are not connected in a loop. The canny detector produces a more complex * tree and the fitted points can be a bit noisy compared to the others. */ public static void fitCannyEdges(ImageFloat32 input) { BufferedImage displayImage = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB); // Finds edges inside the image CannyEdge<ImageFloat32, ImageFloat32> canny = FactoryEdgeDetectors.canny(2, true, true, ImageFloat32.class, ImageFloat32.class); canny.process(input, 0.1f, 0.3f, null); List<EdgeContour> contours = canny.getContours(); Graphics2D g2 = displayImage.createGraphics(); g2.setStroke(new BasicStroke(2)); // used to select colors for each line Random rand = new Random(234); for (EdgeContour e : contours) { g2.setColor(new Color(rand.nextInt())); for (EdgeSegment s : e.segments) { // fit line segments to the point sequence. Note that loop is false List<PointIndex_I32> vertexes = ShapeFittingOps.fitPolygon(s.points, false, toleranceDist, toleranceAngle, 100); VisualizeShapes.drawPolygon(vertexes, false, g2); } } ShowImages.showWindow(displayImage, "Canny Trace"); }
public static void main(String args[]) { BufferedImage input = UtilImageIO.loadImage("../data/evaluation/sunflowers.png"); // BufferedImage input = UtilImageIO.loadImage("../data/evaluation/shapes01.png"); ImageFloat32 gray = ConvertBufferedImage.convertFromSingle(input, null, ImageFloat32.class); SiftDetector alg = FactoryInterestPointAlgs.siftDetector(new ConfigSiftDetector(3, 10, 150, 5)); SiftImageScaleSpace imageSS = new SiftImageScaleSpace(1.6f, 5, 4, false); imageSS.constructPyramid(gray); imageSS.computeFeatureIntensity(); alg.process(imageSS); System.out.println("total features found: " + alg.getFoundPoints().size()); VisualizeFeatures.drawScalePoints( input.createGraphics(), alg.getFoundPoints().toList(), BoofDefaults.SCALE_SPACE_CANONICAL_RADIUS); ListDisplayPanel dog = new ListDisplayPanel(); for (int i = 0; i < alg.ss.dog.length; i++) { int scale = i % (alg.ss.numScales - 1); int octave = i / (alg.ss.numScales - 1); BufferedImage img = VisualizeImageData.colorizeSign(alg.ss.dog[i], null, -1); dog.addImage(img, octave + " " + scale); } ListDisplayPanel ss = new ListDisplayPanel(); for (int i = 0; i < alg.ss.scale.length; i++) { int scale = i % alg.ss.numScales; int octave = i / alg.ss.numScales; BufferedImage img = VisualizeImageData.grayMagnitude(alg.ss.scale[i], null, 255); ss.addImage(img, octave + " " + scale); } ShowImages.showWindow(dog, "Octave DOG"); ShowImages.showWindow(ss, "Octave Scales"); ShowImages.showWindow(input, "Found Features"); System.out.println("Done"); }
public static void main(String args[]) { BufferedImage input = UtilImageIO.loadImage("corn1.png"); detectLines(input, ImageUInt8.class, ImageSInt16.class); // line segment detection is still under development and only works for F32 images right now detectLineSegments(input, ImageFloat32.class, ImageFloat32.class); ShowImages.showWindow(listPanel, "Detected Lines"); }
public static void main(String args[]) { // load and convert the image into a usable format BufferedImage image = UtilImageIO.loadImage("../data/applet/shapes02.png"); ImageFloat32 input = ConvertBufferedImage.convertFromSingle(image, null, ImageFloat32.class); ShowImages.showWindow(image, "Original"); fitCannyEdges(input); fitCannyBinary(input); fitBinaryImage(input); }
public static void main(String[] args) { DescribeImageDense<ImageUInt8, TupleDesc_F64> desc = (DescribeImageDense) FactoryDescribeImageDense.surfFast( null, new ConfigDenseSample(DESC_SCALE, DESC_SKIP, DESC_SKIP), ImageUInt8.class); ComputeClusters<double[]> clusterer = FactoryClustering.kMeans_F64(null, MAX_KNN_ITERATIONS, 20, 1e-6); clusterer.setVerbose(true); NearestNeighbor<HistogramScene> nn = FactoryNearestNeighbor.exhaustive(); ExampleClassifySceneKnn example = new ExampleClassifySceneKnn(desc, clusterer, nn); File trainingDir = new File(UtilIO.pathExample("learning/scene/train")); File testingDir = new File(UtilIO.pathExample("learning/scene/test")); if (!trainingDir.exists() || !testingDir.exists()) { System.err.println( "Please follow instructions in data/applet/learning/scene and download the"); System.err.println("required files"); System.exit(1); } example.loadSets(trainingDir, null, testingDir); // train the classifier example.learnAndSave(); // now load it for evaluation purposes from the files example.loadAndCreateClassifier(); // test the classifier on the test set Confusion confusion = example.evaluateTest(); confusion.getMatrix().print(); System.out.println("Accuracy = " + confusion.computeAccuracy()); // Show confusion matrix // Not the best coloration scheme... perfect = red diagonal and blue elsewhere. ShowImages.showWindow( new ConfusionMatrixPanel(confusion.getMatrix(), 400, true), "Confusion Matrix", true); // For "fast" SURF descriptor the accuracy is 52.2% // For "stable" SURF descriptor the accuracy is 49.4% // This is interesting. When matching images "stable" is significantly better than "fast" // One explanation is that the descriptor for "fast" samples a smaller region than "stable", by // a // couple of pixels at scale of 1. Thus there is less overlap between the features. // Reducing the size of "stable" to 0.95 does slightly improve performance to 50.5%, can't scale // it down // much more without performance going down }
public static void main(String args[]) { Class imageType = ImageFloat32.class; Class derivType = GImageDerivativeOps.getDerivativeType(imageType); VisualizeAssociationMatchesApp app = new VisualizeAssociationMatchesApp(imageType, derivType); List<PathLabel> inputs = new ArrayList<PathLabel>(); inputs.add( new PathLabel( "Cave", "../data/evaluation/stitch/cave_01.jpg", "../data/evaluation/stitch/cave_02.jpg")); inputs.add( new PathLabel( "Kayak", "../data/evaluation/stitch/kayak_02.jpg", "../data/evaluation/stitch/kayak_03.jpg")); inputs.add( new PathLabel( "Forest", "../data/evaluation/scale/rainforest_01.jpg", "../data/evaluation/scale/rainforest_02.jpg")); inputs.add( new PathLabel( "Building", "../data/evaluation/stitch/apartment_building_01.jpg", "../data/evaluation/stitch/apartment_building_02.jpg")); inputs.add( new PathLabel( "Trees Rotate", "../data/evaluation/stitch/trees_rotate_01.jpg", "../data/evaluation/stitch/trees_rotate_03.jpg")); app.setPreferredSize(new Dimension(1000, 500)); app.setSize(1000, 500); app.setInputList(inputs); // wait for it to process one image so that the size isn't all screwed up while (!app.getHasProcessedImage()) { Thread.yield(); } ShowImages.showWindow(app, "Associated Features"); }
public static void main(String args[]) { // VisualizeScaleSpaceApp app = new VisualizeScaleSpaceApp(ImageFloat32.class); VisualizeScaleSpaceApp app = new VisualizeScaleSpaceApp(ImageUInt8.class); List<PathLabel> inputs = new ArrayList<PathLabel>(); inputs.add(new PathLabel("boat", "../data/evaluation/standard/boat.png")); inputs.add(new PathLabel("shapes", "../data/evaluation/shapes01.png")); inputs.add(new PathLabel("sunflowers", "../data/evaluation/sunflowers.png")); app.setInputList(inputs); // wait for it to process one image so that the size isn't all screwed up while (!app.getHasProcessedImage()) { Thread.yield(); } ShowImages.showWindow(app, "Scale Space"); }
public static void main(String args[]) { DemoBinaryImageOpsApp app = new DemoBinaryImageOpsApp(GrayF32.class); java.util.List<PathLabel> inputs = new ArrayList<>(); inputs.add(new PathLabel("particles", UtilIO.pathExample("particles01.jpg"))); inputs.add(new PathLabel("shapes", UtilIO.pathExample("shapes/shapes01.png"))); app.setInputList(inputs); // wait for it to process one image so that the size isn't all screwed up while (!app.getHasProcessedImage()) { Thread.yield(); } ShowImages.showWindow(app, "Binary Image Ops", true); System.out.println("Done"); }
public void process(final SimpleImageSequence<T> sequence) { if (!sequence.hasNext()) throw new IllegalArgumentException("Empty sequence"); image = sequence.next(); gui.setFrame((BufferedImage) sequence.getGuiImage()); ShowImages.showWindow(gui, "Circulant Tracker"); // tracker.initialize(image,273,156,358-273,293-156); paused = true; while (paused) { Thread.yield(); } int totalFrames = 0; long totalTime = 0; while (sequence.hasNext()) { totalFrames++; image = sequence.next(); gui.setFrame((BufferedImage) sequence.getGuiImage()); long before = System.nanoTime(); tracker.performTracking(image); long after = System.nanoTime(); totalTime += after - before; System.out.println("FPS = " + (totalFrames) / (totalTime / 2e9)); gui.update(tracker); Rectangle2D_F32 r = tracker.getTargetLocation(); System.out.println("Target: " + r); gui.repaint(); while (paused) { Thread.yield(); } } System.out.println("DONE"); }
public static void main(String args[]) { FourierVisualizeApp app = new FourierVisualizeApp(ImageDataType.F32); // FourierVisualizeApp app = new FourierVisualizeApp(ImageTypeInfo.F64); java.util.List<PathLabel> inputs = new ArrayList<PathLabel>(); inputs.add(new PathLabel("lena", "../data/evaluation/standard/lena512.bmp")); inputs.add(new PathLabel("boat", "../data/evaluation/standard/boat.png")); inputs.add(new PathLabel("fingerprint", "../data/evaluation/standard/fingerprint.png")); inputs.add(new PathLabel("shapes", "../data/evaluation/shapes01.png")); inputs.add(new PathLabel("sunflowers", "../data/evaluation/sunflowers.png")); app.setInputList(inputs); // wait for it to process one image so that the size isn't all screwed up while (!app.getHasProcessedImage()) { Thread.yield(); } ShowImages.showWindow(app, "Discrete Fourier Transform"); }
/** Fits polygons to found contours around binary blobs. */ public static void fitBinaryImage(ImageFloat32 input) { ImageUInt8 binary = new ImageUInt8(input.width, input.height); BufferedImage polygon = new BufferedImage(input.width, input.height, BufferedImage.TYPE_INT_RGB); // the mean pixel value is often a reasonable threshold when creating a binary image double mean = ImageStatistics.mean(input); // create a binary image by thresholding ThresholdImageOps.threshold(input, binary, (float) mean, true); // reduce noise with some filtering ImageUInt8 filtered = BinaryImageOps.erode8(binary, null); filtered = BinaryImageOps.dilate8(filtered, null); // Find the contour around the shapes List<Contour> contours = BinaryImageOps.contour(filtered, 8, null); // Fit a polygon to each shape and draw the results Graphics2D g2 = polygon.createGraphics(); g2.setStroke(new BasicStroke(2)); for (Contour c : contours) { // Fit the polygon to the found external contour. Note loop = true List<PointIndex_I32> vertexes = ShapeFittingOps.fitPolygon(c.external, true, toleranceDist, toleranceAngle, 100); g2.setColor(Color.RED); VisualizeShapes.drawPolygon(vertexes, true, g2); // handle internal contours now g2.setColor(Color.BLUE); for (List<Point2D_I32> internal : c.internal) { vertexes = ShapeFittingOps.fitPolygon(internal, true, toleranceDist, toleranceAngle, 100); VisualizeShapes.drawPolygon(vertexes, true, g2); } } ShowImages.showWindow(polygon, "Binary Blob Contours"); }
public static void main(String[] args) { String nameIntrinsic = null; int cameraId = 0; if (args.length >= 1) { cameraId = Integer.parseInt(args[0]); } if (args.length >= 2) { nameIntrinsic = args[1]; } else { System.out.println(); System.out.println("SERIOUSLY YOU NEED TO CALIBRATE THE CAMERA YOURSELF!"); System.out.println("There will be a lot more jitter and inaccurate pose"); System.out.println(); } System.out.println(); System.out.println("camera ID = " + cameraId); System.out.println("intrinsic file = " + nameIntrinsic); System.out.println(); Webcam webcam = Webcam.getWebcams().get(cameraId); UtilWebcamCapture.adjustResolution(webcam, 640, 480); webcam.open(); Dimension d = webcam.getDevice().getResolution(); int imageWidth = d.width; int imageHeight = d.height; ConfigPolygonDetector config = new ConfigPolygonDetector(4); config.configRefineLines.sampleRadius = 2; config.configRefineLines.maxIterations = 30; InputToBinary<ImageFloat32> inputToBinary = FactoryThresholdBinary.globalOtsu(0, 255, true, ImageFloat32.class); // FactoryThresholdBinary.globalEntropy(0,255,true,ImageFloat32.class); // FactoryThresholdBinary.adaptiveSquare(10,0,true,ImageFloat32.class); BinaryPolygonConvexDetector<ImageFloat32> detector = FactoryShapeDetector.polygon( inputToBinary, new ConfigPolygonDetector(4), ImageFloat32.class); ImageFloat32 gray = new ImageFloat32(imageWidth, imageHeight); ImagePanel gui = new ImagePanel(imageWidth, imageHeight); ShowImages.showWindow(gui, "Fiducials", true); while (true) { BufferedImage frame = webcam.getImage(); ConvertBufferedImage.convertFrom(frame, gray); detector.process(gray); // display the results Graphics2D g2 = frame.createGraphics(); List<Polygon2D_F64> shapes = detector.getFound().toList(); g2.setStroke(new BasicStroke(4)); g2.setColor(Color.RED); g2.setRenderingHint(RenderingHints.KEY_STROKE_CONTROL, RenderingHints.VALUE_STROKE_PURE); g2.setRenderingHint(RenderingHints.KEY_ANTIALIASING, RenderingHints.VALUE_ANTIALIAS_ON); Line2D.Double l = new Line2D.Double(); for (int i = 0; i < shapes.size(); i++) { Polygon2D_F64 poly = shapes.get(i); for (int j = 0; j < poly.size(); j++) { int k = (j + 1) % poly.size(); l.setLine(poly.get(j).x, poly.get(j).y, poly.get(k).x, poly.get(k).y); g2.draw(l); } } gui.setBufferedImageSafe(frame); gui.repaint(); } }
public void evaluate(String dataName, TldTracker<T, ?> tracker) { System.out.println("Processing " + dataName); String path = "data/track_rect/TLD/" + dataName; Rectangle2D_F64 initial = UtilTldData.parseRectangle(path + "/init.txt"); Rectangle2D_F64 found = new Rectangle2D_F64(); TldVisualizationPanel gui = null; String imageType = new File(path + "/00001.jpg").exists() ? "jpg" : "png"; int imageNum = 0; while (true) { String imageName = String.format("%s/%05d.%s", path, imageNum + 1, imageType); BufferedImage image = UtilImageIO.loadImage(imageName); if (image == null) break; input.reshape(image.getWidth(), image.getHeight()); ConvertBufferedImage.convertFrom(image, input, true); boolean detected; if (imageNum == 0) { gui = new TldVisualizationPanel(this); gui.setFrame(image); gui.setSelectRectangle(false); ShowImages.showWindow(gui, dataName); tracker.initialize( input, (int) initial.p0.x, (int) initial.p0.y, (int) initial.p1.x, (int) initial.p1.y); detected = true; } else { detected = tracker.track(input); found.set(tracker.getTargetRegion()); } if (!detected) { System.out.println("No Detection"); } else { System.out.printf( "Detection: %f,%f,%f,%f\n", found.p0.x, found.p0.y, found.p1.x, found.p1.y); Graphics2D g2 = image.createGraphics(); int w = (int) found.getWidth(); int h = (int) found.getHeight(); g2.drawRect((int) found.p0.x, (int) found.p0.y, w, h); } gui.setFrame(image); gui.update(tracker, detected); gui.repaint(); imageNum++; while (paused) { Thread.yield(); } // BoofMiscOps.pause(30); } System.out.println(); }
public static void main(String[] args) { // Example with a moving camera. Highlights why motion estimation is sometimes required String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg"); // Camera has a bit of jitter in it. Static kinda works but motion reduces false positives // String fileName = UtilIO.pathExample("background/horse_jitter.mp4"); // Comment/Uncomment to switch input image type ImageType imageType = ImageType.single(GrayF32.class); // ImageType imageType = ImageType.il(3, InterleavedF32.class); // ImageType imageType = ImageType.il(3, InterleavedU8.class); // Configure the feature detector ConfigGeneralDetector confDetector = new ConfigGeneralDetector(); confDetector.threshold = 10; confDetector.maxFeatures = 300; confDetector.radius = 6; // Use a KLT tracker PointTracker tracker = FactoryPointTracker.klt(new int[] {1, 2, 4, 8}, confDetector, 3, GrayF32.class, null); // This estimates the 2D image motion ImageMotion2D<GrayF32, Homography2D_F64> motion2D = FactoryMotion2D.createMotion2D( 500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64()); ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f); // Configuration for Gaussian model. Note that the threshold changes depending on the number of // image bands // 12 = gray scale and 40 = color ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f); configGaussian.initialVariance = 64; configGaussian.minimumDifference = 5; // Comment/Uncomment to switch background mode BackgroundModelMoving background = FactoryBackgroundModel.movingBasic( configBasic, new PointTransformHomography_F32(), imageType); // FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), // imageType); MediaManager media = DefaultMediaManager.INSTANCE; SimpleImageSequence video = media.openVideo(fileName, background.getImageType()); // media.openCamera(null,640,480,background.getImageType()); // ====== Initialize Images // storage for segmented image. Background = 0, Foreground = 1 GrayU8 segmented = new GrayU8(video.getNextWidth(), video.getNextHeight()); // Grey scale image that's the input for motion estimation GrayF32 grey = new GrayF32(segmented.width, segmented.height); // coordinate frames Homography2D_F32 firstToCurrent32 = new Homography2D_F32(); Homography2D_F32 homeToWorld = new Homography2D_F32(); homeToWorld.a13 = grey.width / 2; homeToWorld.a23 = grey.height / 2; // Create a background image twice the size of the input image. Tell it that the home is in the // center background.initialize(grey.width * 2, grey.height * 2, homeToWorld); BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB); ImageGridPanel gui = new ImageGridPanel(1, 2); gui.setImages(visualized, visualized); ShowImages.showWindow(gui, "Detections", true); double fps = 0; double alpha = 0.01; // smoothing factor for FPS while (video.hasNext()) { ImageBase input = video.next(); long before = System.nanoTime(); GConvertImage.convert(input, grey); if (!motion2D.process(grey)) { throw new RuntimeException("Should handle this scenario"); } Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent(); UtilHomography.convert(firstToCurrent64, firstToCurrent32); background.segment(firstToCurrent32, input, segmented); background.updateBackground(firstToCurrent32, input); long after = System.nanoTime(); fps = (1.0 - alpha) * fps + alpha * (1.0 / ((after - before) / 1e9)); VisualizeBinaryData.renderBinary(segmented, false, visualized); gui.setImage(0, 0, (BufferedImage) video.getGuiImage()); gui.setImage(0, 1, visualized); gui.repaint(); System.out.println("FPS = " + fps); try { Thread.sleep(5); } catch (InterruptedException e) { } } }