/** * Detects lines inside the image using different types of Hough detectors * * @param image Input image. * @param imageType Type of image processed by line detector. * @param derivType Type of image derivative. */ public static <T extends ImageSingleBand, D extends ImageSingleBand> void detectLines( BufferedImage image, Class<T> imageType, Class<D> derivType) { // convert the line into a single band image T input = ConvertBufferedImage.convertFromSingle(image, null, imageType); // Comment/uncomment to try a different type of line detector DetectLineHoughPolar<T, D> detector = FactoryDetectLineAlgs.houghPolar( new ConfigHoughPolar(3, 30, 2, Math.PI / 180, edgeThreshold, maxLines), imageType, derivType); // DetectLineHoughFoot<T,D> detector = FactoryDetectLineAlgs.houghFoot( // new ConfigHoughFoot(3, 8, 5, edgeThreshold,maxLines), imageType, derivType); // DetectLineHoughFootSubimage<T,D> detector = FactoryDetectLineAlgs.houghFootSub( // new ConfigHoughFootSubimage(3, 8, 5, edgeThreshold,maxLines, 2, 2), imageType, derivType); List<LineParametric2D_F32> found = detector.detect(input); // display the results ImageLinePanel gui = new ImageLinePanel(); gui.setBackground(image); gui.setLines(found); gui.setPreferredSize(new Dimension(image.getWidth(), image.getHeight())); listPanel.addItem(gui, "Found Lines"); }
/** * **************************************************************************************************************************************** */ private static List<List<PointIndex_I32>> getCandidates( BufferedImage image, int blurRadius, float threshLow, float threshHigh, double toleranceDist, double toleranceAngle, boolean dynamicThreshold) { List<List<PointIndex_I32>> candidates = new ArrayList<List<PointIndex_I32>>(); ImageFloat32 input = ConvertBufferedImage.convertFromSingle(image, null, ImageFloat32.class); ImageUInt8 binary = new ImageUInt8(input.width, input.height); // Finds edges inside the image CannyEdge<ImageFloat32, ImageFloat32> canny = FactoryEdgeDetectors.canny( blurRadius, false, dynamicThreshold, ImageFloat32.class, ImageFloat32.class); canny.process(input, threshLow, threshHigh, binary); List<Contour> contours = BinaryImageOps.contour(binary, rule, null); for (Contour c : contours) { // Only the external contours are relevant. List<PointIndex_I32> vertices = ShapeFittingOps.fitPolygon(c.external, true, toleranceDist, toleranceAngle, 100); candidates.add(vertices); } return candidates; }
public static void main(String args[]) { // load and convert the image into a usable format BufferedImage image = UtilImageIO.loadImage("../data/applet/shapes02.png"); ImageFloat32 input = ConvertBufferedImage.convertFromSingle(image, null, ImageFloat32.class); ShowImages.showWindow(image, "Original"); fitCannyEdges(input); fitCannyBinary(input); fitBinaryImage(input); }
public void process(final BufferedImage input) { setInputImage(input); this.input = input; workImage = ConvertBufferedImage.convertFromSingle(input, null, imageType); scaledIntensity = new ImageFloat32(workImage.width, workImage.height); pyramid.setImage(workImage); SwingUtilities.invokeLater( new Runnable() { public void run() { setPreferredSize(new Dimension(input.getWidth(), input.getHeight())); processedImage = true; } }); doRefreshAll(); }
public void process(BufferedImage image) { setInputImage(image); T gray = ConvertBufferedImage.convertFromSingle(image, null, imageType); ss.setImage(gray); gui.reset(); for (int i = 0; i < ss.getTotalScales(); i++) { ss.setActiveScale(i); double scale = ss.getCurrentScale(); T scaledImage = ss.getScaledImage(); BufferedImage b = ConvertBufferedImage.convertTo(scaledImage, null); gui.addImage(b, String.format("Scale %6.2f", scale)); } processedImage = true; }
public static void main(String args[]) { BufferedImage input = UtilImageIO.loadImage("../data/evaluation/sunflowers.png"); // BufferedImage input = UtilImageIO.loadImage("../data/evaluation/shapes01.png"); ImageFloat32 gray = ConvertBufferedImage.convertFromSingle(input, null, ImageFloat32.class); SiftDetector alg = FactoryInterestPointAlgs.siftDetector(new ConfigSiftDetector(3, 10, 150, 5)); SiftImageScaleSpace imageSS = new SiftImageScaleSpace(1.6f, 5, 4, false); imageSS.constructPyramid(gray); imageSS.computeFeatureIntensity(); alg.process(imageSS); System.out.println("total features found: " + alg.getFoundPoints().size()); VisualizeFeatures.drawScalePoints( input.createGraphics(), alg.getFoundPoints().toList(), BoofDefaults.SCALE_SPACE_CANONICAL_RADIUS); ListDisplayPanel dog = new ListDisplayPanel(); for (int i = 0; i < alg.ss.dog.length; i++) { int scale = i % (alg.ss.numScales - 1); int octave = i / (alg.ss.numScales - 1); BufferedImage img = VisualizeImageData.colorizeSign(alg.ss.dog[i], null, -1); dog.addImage(img, octave + " " + scale); } ListDisplayPanel ss = new ListDisplayPanel(); for (int i = 0; i < alg.ss.scale.length; i++) { int scale = i % alg.ss.numScales; int octave = i / alg.ss.numScales; BufferedImage img = VisualizeImageData.grayMagnitude(alg.ss.scale[i], null, 255); ss.addImage(img, octave + " " + scale); } ShowImages.showWindow(dog, "Octave DOG"); ShowImages.showWindow(ss, "Octave Scales"); ShowImages.showWindow(input, "Found Features"); System.out.println("Done"); }
/** * Detects segments inside the image * * @param image Input image. * @param imageType Type of image processed by line detector. * @param derivType Type of image derivative. */ public static <T extends ImageSingleBand, D extends ImageSingleBand> void detectLineSegments( BufferedImage image, Class<T> imageType, Class<D> derivType) { // convert the line into a single band image T input = ConvertBufferedImage.convertFromSingle(image, null, imageType); // Comment/uncomment to try a different type of line detector DetectLineSegmentsGridRansac<T, D> detector = FactoryDetectLineAlgs.lineRansac(40, 30, 2.36, true, imageType, derivType); List<LineSegment2D_F32> found = detector.detect(input); // display the results ImageLinePanel gui = new ImageLinePanel(); gui.setBackground(image); gui.setLineSegments(found); gui.setPreferredSize(new Dimension(image.getWidth(), image.getHeight())); listPanel.addItem(gui, "Found Line Segments"); }
public static void main(String[] args) throws IOException { if (args.length == 0) { System.out.println( "usage: crop-objects [OPTION]... FILE [DIR]\n" + "crops detected objects from image FILE and writes their subimages to files. \n" + "Can specify the DIR in which to create the files, otherwise subimage files are " + "created in the same directory as FILE is in by default. \n" + "-t [n] [m] set the high and low threshold values. n and m are values between 0 and 1."); System.exit(1); } // interpret options and read arguments if (args[0].equals("-t")) { threshLow = Float.parseFloat(args[1]); threshHigh = Float.parseFloat(args[2]); filename = args[3]; if (args.length == 5) { dir = args[4]; } } else { filename = args[0]; if (args.length == 2) { dir = args[1]; } } // get path to directory file is in String name = FilenameUtils.removeExtension(filename); // get image BufferedImage image = UtilImageIO.loadImage(new File(filename).getAbsolutePath()); if (image == null) { System.out.println( "usage: crop-objects [OPTION]... FILE [DIR]\n" + "crops detected objects from image FILE and writes their subimages to files. \n" + "Can specify the DIR in which to create the files, otherwise subimage files are " + "created in the same directory as FILE is in by default. \n" + "-t [n] [m] set the high and low threshold values. n and m are values between 0 and 1."); System.exit(1); } minsize = (int) (0.1 * Math.min(image.getHeight(), image.getWidth())); // find objects in image // generate candidate contours ArrayList<List<PointIndex_I32>> objects = new ArrayList<List<PointIndex_I32>>(); List<BufferedImage> results = new ArrayList<BufferedImage>(); List<List<PointIndex_I32>> candidates = new ArrayList<List<PointIndex_I32>>(); ImageFloat32 input = ConvertBufferedImage.convertFromSingle(image, null, ImageFloat32.class); BufferedImage bw = ConvertBufferedImage.convertTo( input, new BufferedImage(image.getWidth(), image.getHeight(), image.getType())); File binaryfile = new File(name + "_" + "binary.png"); ImageIO.write(bw, "png", binaryfile); ImageUInt8 binary = new ImageUInt8(input.width, input.height); // Finds edges inside the image CannyEdge<ImageFloat32, ImageFloat32> canny = FactoryEdgeDetectors.canny( blurRadius, true, dynamicThreshold, ImageFloat32.class, ImageFloat32.class); canny.process(input, threshLow, threshHigh, binary); List<Contour> contours = BinaryImageOps.contour(binary, rule, null); BufferedImage visualBinary = VisualizeBinaryData.renderBinary(binary, null); File cannyfile = new File(name + "_" + "canny.png"); ImageIO.write(visualBinary, "png", cannyfile); BufferedImage cannyContour = VisualizeBinaryData.renderExternal(contours, null, binary.width, binary.height, null); File cannyContourfile = new File(name + "_" + "contour.png"); ImageIO.write(cannyContour, "png", cannyContourfile); for (Contour c : contours) { // Only the external contours are relevant. List<PointIndex_I32> vertices = ShapeFittingOps.fitPolygon(c.external, true, toleranceDist, toleranceAngle, 100); candidates.add(vertices); } for (List<PointIndex_I32> vertices : candidates) { try { Candidate c = new Candidate(vertices, image); if (c.size(minsize)) { c.rotate(); results.add(c.getImage()); objects.add(vertices); } } catch (Exception e) { System.out.println("Error creating candidate from contour " + e.getMessage()); } } // write subimages of objects to files int i = 0; for (BufferedImage obj : results) { // print images to file try { File outputfile = new File(name + "_" + i + ".png"); i++; ImageIO.write(obj, "png", outputfile); } catch (IOException e) { System.out.println("Error writing subimages" + e.getMessage()); } } // draw objects onto original image and save Draw.drawPolygons(objects, image); File outputfile = new File(name + "_" + "annotated.png"); ImageIO.write(image, "png", outputfile); }