/* * Calculates second moments matrix square root */ float calcSecondMomentSqrt(AbstractStructureTensorIPD ipd, Pixel p, Matrix Mk) { Matrix M, V, eigVal, Vinv; M = calcSecondMomentMatrix(ipd, p.x, p.y); /* * * M = V * D * V.inv() * V has eigenvectors as columns * D is a diagonal Matrix with eigenvalues as elements * V.inv() is the inverse of V * */ // EigenvalueDecomposition meig = M.eig(); EigenValueVectorPair meig = MatrixUtils.symmetricEig2x2(M); eigVal = meig.getValues(); V = meig.getVectors(); // V = V.transpose(); Vinv = V.inverse(); double eval1 = Math.sqrt(eigVal.get(0, 0)); eigVal.set(0, 0, eval1); double eval2 = Math.sqrt(eigVal.get(1, 1)); eigVal.set(1, 1, eval2); // square root of M Mk.setMatrix(0, 1, 0, 1, V.times(eigVal).times(Vinv)); // return q isotropic measure return (float) (Math.min(eval1, eval2) / Math.max(eval1, eval2)); }
/* * Selects diffrentiation scale */ AbstractStructureTensorIPD selDifferentiationScale( FImage img, AbstractStructureTensorIPD ipdToUse, float si, Pixel c) { AbstractStructureTensorIPD best = null; float s = 0.5f; float sigma_prev = 0, sigma; FImage L; double qMax = 0; L = img.clone(); AbstractStructureTensorIPD ipd = ipdToUse.clone(); while (s <= 0.751) { Matrix M; float sd = s * si; // Smooth previous smoothed image L sigma = (float) Math.sqrt(Math.pow(sd, 2) - Math.pow(sigma_prev, 2)); L.processInplace(new FGaussianConvolve(sigma, 3)); sigma_prev = sd; // X and Y derivatives ipd.setDetectionScale(sd); ipd.setIntegrationScale(si); ipd.setImageBlurred(true); ipd.findInterestPoints(L); // FImage Lx = L.process(new FConvolution(DX_KERNEL.multiply(sd))); // FImage Ly = L.process(new FConvolution(DY_KERNEL.multiply(sd))); // // FGaussianConvolve gauss = new FGaussianConvolve(si, 3); // // FImage Lxm2 = Lx.multiply(Lx); // dx2 = Lxm2.process(gauss); // // FImage Lym2 = Ly.multiply(Ly); // dy2 = Lym2.process(gauss); // // FImage Lxmy = Lx.multiply(Ly); // dxy = Lxmy.process(gauss); M = calcSecondMomentMatrix(ipd, c.x, c.y); // calc eigenvalues // EigenvalueDecomposition meig = M.eig(); EigenValueVectorPair meig = MatrixUtils.symmetricEig2x2(M); Matrix eval = meig.getValues(); double eval1 = Math.abs(eval.get(0, 0)); double eval2 = Math.abs(eval.get(1, 1)); double q = Math.min(eval1, eval2) / Math.max(eval1, eval2); if (q >= qMax) { qMax = q; best = ipd.clone(); } s += 0.05; } return best; }
/* * Performs affine adaptation */ boolean calcAffineAdaptation( final FImage fimage, EllipticInterestPointData kpt, AbstractStructureTensorIPD ipd) { // DisplayUtilities.createNamedWindow("warp", "Warped Image ROI",true); Matrix transf = new Matrix(2, 3); // Transformation matrix Point2dImpl c = new Point2dImpl(); // Transformed point Point2dImpl p = new Point2dImpl(); // Image point Matrix U = Matrix.identity(2, 2); // Normalization matrix Matrix Mk = U.copy(); FImage img_roi, warpedImg = new FImage(1, 1); float Qinv = 1, q, si = kpt.scale; // sd = 0.75f * si; float kptSize = 2 * 3 * kpt.scale; boolean divergence = false, convergence = false; int i = 0; // Coordinates in image int py = (int) kpt.y; int px = (int) kpt.x; // Roi coordinates int roix, roiy; // Coordinates in U-trasformation int cx = px; int cy = py; int cxPr = cx; int cyPr = cy; float radius = kptSize / 2 * 1.4f; float half_width, half_height; Rectangle roi; // Affine adaptation while (i <= 10 && !divergence && !convergence) { // Transformation matrix MatrixUtils.zero(transf); transf.setMatrix(0, 1, 0, 1, U); kpt.setTransform(U); Rectangle boundingBox = new Rectangle(); double ac_b2 = U.det(); boundingBox.width = (float) Math.ceil(U.get(1, 1) / ac_b2 * 3 * si * 1.4); boundingBox.height = (float) Math.ceil(U.get(0, 0) / ac_b2 * 3 * si * 1.4); // Create window around interest point half_width = Math.min((float) Math.min(fimage.width - px - 1, px), boundingBox.width); half_height = Math.min((float) Math.min(fimage.height - py - 1, py), boundingBox.height); if (half_width <= 0 || half_height <= 0) return divergence; roix = Math.max(px - (int) boundingBox.width, 0); roiy = Math.max(py - (int) boundingBox.height, 0); roi = new Rectangle(roix, roiy, px - roix + half_width + 1, py - roiy + half_height + 1); // create ROI img_roi = fimage.extractROI(roi); // Point within the ROI p.x = px - roix; p.y = py - roiy; // Find coordinates of square's angles to find size of warped ellipse's bounding box float u00 = (float) U.get(0, 0); float u01 = (float) U.get(0, 1); float u10 = (float) U.get(1, 0); float u11 = (float) U.get(1, 1); float minx = u01 * img_roi.height < 0 ? u01 * img_roi.height : 0; float miny = u10 * img_roi.width < 0 ? u10 * img_roi.width : 0; float maxx = (u00 * img_roi.width > u00 * img_roi.width + u01 * img_roi.height ? u00 * img_roi.width : u00 * img_roi.width + u01 * img_roi.height) - minx; float maxy = (u11 * img_roi.width > u10 * img_roi.width + u11 * img_roi.height ? u11 * img_roi.height : u10 * img_roi.width + u11 * img_roi.height) - miny; // Shift transf.set(0, 2, -minx); transf.set(1, 2, -miny); if (maxx >= 2 * radius + 1 && maxy >= 2 * radius + 1) { // Size of normalized window must be 2*radius // Transformation FImage warpedImgRoi; FProjectionProcessor proc = new FProjectionProcessor(); proc.setMatrix(transf); img_roi.accumulateWith(proc); warpedImgRoi = proc.performProjection(0, (int) maxx, 0, (int) maxy, null); // DisplayUtilities.displayName(warpedImgRoi.clone().normalise(), "warp"); // Point in U-Normalized coordinates c = p.transform(U); cx = (int) (c.x - minx); cy = (int) (c.y - miny); if (warpedImgRoi.height > 2 * radius + 1 && warpedImgRoi.width > 2 * radius + 1) { // Cut around normalized patch roix = (int) Math.max(cx - Math.ceil(radius), 0.0); roiy = (int) Math.max(cy - Math.ceil(radius), 0.0); roi = new Rectangle( roix, roiy, cx - roix + (float) Math.min(Math.ceil(radius), warpedImgRoi.width - cx - 1) + 1, cy - roiy + (float) Math.min(Math.ceil(radius), warpedImgRoi.height - cy - 1) + 1); warpedImg = warpedImgRoi.extractROI(roi); // Coordinates in cutted ROI cx = cx - roix; cy = cy - roiy; } else { warpedImg.internalAssign(warpedImgRoi); } if (logger.getLevel() == Level.DEBUG) { displayCurrentPatch( img_roi.clone().normalise(), p.x, p.y, warpedImg.clone().normalise(), cx, cy, U, si * 3); } // Integration Scale selection si = selIntegrationScale(warpedImg, si, new Pixel(cx, cy)); // Differentation scale selection if (fastDifferentiationScale) { ipd = selDifferentiationScaleFast(warpedImg, ipd, si, new Pixel(cx, cy)); } else { ipd = selDifferentiationScale(warpedImg, ipd, si, new Pixel(cx, cy)); } if (ipd.maxima.size() == 0) { divergence = true; continue; } // Spatial Localization cxPr = cx; // Previous iteration point in normalized window cyPr = cy; // // float cornMax = 0; // for (int j = 0; j < 3; j++) // { // for (int t = 0; t < 3; t++) // { // float dx2 = Lxm2smooth.pixels[cyPr - 1 + j][cxPr - 1 + t]; // float dy2 = Lym2smooth.pixels[cyPr - 1 + j][cxPr - 1 + t]; // float dxy = Lxmysmooth.pixels[cyPr - 1 + j][cxPr - 1 + t]; // float det = dx2 * dy2 - dxy * dxy; // float tr = dx2 + dy2; // float cornerness = (float) (det - (0.04 * Math.pow(tr, 2))); // // if (cornerness > cornMax) { // cornMax = cornerness; // cx = cxPr - 1 + t; // cy = cyPr - 1 + j; // } // } // } FValuePixel max = ipd.findMaximum(new Rectangle(cxPr - 1, cyPr - 1, 3, 3)); cx = max.x; cy = max.y; // Transform point in image coordinates p.x = px; p.y = py; // Displacement vector c.x = cx - cxPr; c.y = cy - cyPr; // New interest point location in image p.translate(c.transform(U.inverse())); px = (int) p.x; py = (int) p.y; q = calcSecondMomentSqrt(ipd, new Pixel(cx, cy), Mk); float ratio = 1 - q; // if ratio == 1 means q == 0 and one axes equals to 0 if (!Float.isNaN(ratio) && ratio != 1) { // Update U matrix U = U.times(Mk); Matrix uVal, uV; // EigenvalueDecomposition ueig = U.eig(); EigenValueVectorPair ueig = MatrixUtils.symmetricEig2x2(U); uVal = ueig.getValues(); uV = ueig.getVectors(); Qinv = normMaxEval(U, uVal, uV); // Keypoint doesn't converge if (Qinv >= 6) { logger.debug("QInverse too large, feature too edge like, affine divergence!"); divergence = true; } else if (ratio <= 0.05) { // Keypoint converges convergence = true; // Set transformation matrix MatrixUtils.zero(transf); transf.setMatrix(0, 1, 0, 1, U); // The order here matters, setTransform uses the x and y to calculate a new ellipse kpt.x = px; kpt.y = py; kpt.scale = si; kpt.setTransform(U); kpt.score = max.value; // ax1 = (float) (1 / Math.abs(uVal.get(1, 1)) * 3 * si); // ax2 = (float) (1 / Math.abs(uVal.get(0, 0)) * 3 * si); // phi = Math.atan(uV.get(1, 1) / uV.get(0, 1)); // kpt.axes = new Point2dImpl(ax1, ax2); // kpt.phi = phi; // kpt.centre = new Pixel(px, py); // kpt.si = si; // kpt.size = 2 * 3 * si; } else { radius = (float) (3 * si * 1.4); } } else { logger.debug("QRatio was close to 0, affine divergence!"); divergence = true; } } else { logger.debug("Window size has grown too fast, scale divergence!"); divergence = true; } ++i; } if (!divergence && !convergence) { logger.debug("Reached max iterations!"); } return convergence; }