Ejemplo n.º 1
0
  // javadoc: AlignMTB::process(src, dst, times, response)
  public void process(List<Mat> src, List<Mat> dst, Mat times, Mat response) {
    Mat src_mat = Converters.vector_Mat_to_Mat(src);
    Mat dst_mat = Converters.vector_Mat_to_Mat(dst);
    process_0(nativeObj, src_mat.nativeObj, dst_mat.nativeObj, times.nativeObj, response.nativeObj);

    return;
  }
Ejemplo n.º 2
0
  // javadoc: AlignMTB::process(src, dst)
  public void process(List<Mat> src, List<Mat> dst) {
    Mat src_mat = Converters.vector_Mat_to_Mat(src);
    Mat dst_mat = Converters.vector_Mat_to_Mat(dst);
    process_1(nativeObj, src_mat.nativeObj, dst_mat.nativeObj);

    return;
  }
 /**
  * Detects keypoints in an image (first variant) or image set (second variant).
  *
  * @param images Image set.
  * @param keypoints The detected keypoints. In the second variant of the method <code>keypoints[i]
  *     </code> is a set of keypoints detected in <code>images[i]</code>.
  * @see <a
  *     href="http://docs.opencv.org/modules/features2d/doc/common_interfaces_of_feature_detectors.html#featuredetector-detect">org.opencv.features2d.FeatureDetector.detect</a>
  */
 public void detect(List<Mat> images, List<MatOfKeyPoint> keypoints) {
   Mat images_mat = Converters.vector_Mat_to_Mat(images);
   Mat keypoints_mat = new Mat();
   detect_3(nativeObj, images_mat.nativeObj, keypoints_mat.nativeObj);
   Converters.Mat_to_vector_vector_KeyPoint(keypoints_mat, keypoints);
   return;
 }
  private static Bitmap imgtrafo(
      Bitmap image1,
      Bitmap image2,
      int p1_x,
      int p1_y,
      int p2_x,
      int p2_y,
      int p3_x,
      int p3_y,
      int p4_x,
      int p4_y) {
    // set output size same size as input
    int resultWidth = image1.getWidth();
    int resultHeight = image1.getHeight();

    Mat inputMat = new Mat(image1.getWidth(), image1.getHeight(), CvType.CV_32FC1);
    Utils.bitmapToMat(image1, inputMat);
    Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_32FC1);

    Point ocvPIn1 = new Point(p1_x, p1_y);
    Point ocvPIn2 = new Point(p2_x, p2_y);
    Point ocvPIn3 = new Point(p3_x, p3_y);
    Point ocvPIn4 = new Point(p4_x, p4_y);
    List<Point> source = new ArrayList<Point>();
    source.add(ocvPIn1);
    source.add(ocvPIn2);
    source.add(ocvPIn3);
    source.add(ocvPIn4);
    Mat inputQuad = Converters.vector_Point2f_to_Mat(source);

    Point ocvPOut1 = new Point(256, 40); // manually set
    Point ocvPOut2 = new Point(522, 62);
    Point ocvPOut3 = new Point(455, 479);
    Point ocvPOut4 = new Point(134, 404);
    List<Point> dest = new ArrayList<Point>();
    dest.add(ocvPOut1);
    dest.add(ocvPOut2);
    dest.add(ocvPOut3);
    dest.add(ocvPOut4);
    Mat outputQuad = Converters.vector_Point2f_to_Mat(dest);

    Mat perspectiveTransform = Imgproc.getPerspectiveTransform(inputQuad, outputQuad);

    Imgproc.warpPerspective(
        inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight)); // ?

    Bitmap output = Bitmap.createBitmap(resultWidth, resultHeight, Bitmap.Config.RGB_565);
    Utils.matToBitmap(outputMat, output);
    return output;
  }
Ejemplo n.º 5
0
  public static void draw3dAxis(
      Mat frame, CameraParameters cp, Scalar color, double height, Mat Rvec, Mat Tvec) {
    //		Mat objectPoints = new Mat(4,3,CvType.CV_32FC1);
    MatOfPoint3f objectPoints = new MatOfPoint3f();
    Vector<Point3> points = new Vector<Point3>();
    points.add(new Point3(0, 0, 0));
    points.add(new Point3(height, 0, 0));
    points.add(new Point3(0, height, 0));
    points.add(new Point3(0, 0, height));
    objectPoints.fromList(points);

    MatOfPoint2f imagePoints = new MatOfPoint2f();
    Calib3d.projectPoints(
        objectPoints, Rvec, Tvec, cp.getCameraMatrix(), cp.getDistCoeff(), imagePoints);
    List<Point> pts = new Vector<Point>();
    Converters.Mat_to_vector_Point(imagePoints, pts);

    Core.line(frame, pts.get(0), pts.get(1), color, 2);
    Core.line(frame, pts.get(0), pts.get(2), color, 2);
    Core.line(frame, pts.get(0), pts.get(3), color, 2);

    Core.putText(frame, "X", pts.get(1), Core.FONT_HERSHEY_SIMPLEX, 0.5, color, 2);
    Core.putText(frame, "Y", pts.get(2), Core.FONT_HERSHEY_SIMPLEX, 0.5, color, 2);
    Core.putText(frame, "Z", pts.get(3), Core.FONT_HERSHEY_SIMPLEX, 0.5, color, 2);
  }
Ejemplo n.º 6
0
  public static Mat warp(Mat inputMat, Mat startM, int factor) {

    int resultWidth = 400 * factor;
    int resultHeight = 240 * factor;

    Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);

    Point ocvPOut1 = new Point(0, 0);
    Point ocvPOut2 = new Point(0, resultHeight);
    Point ocvPOut3 = new Point(resultWidth, resultHeight);
    Point ocvPOut4 = new Point(resultWidth, 0);
    List<Point> dest = new ArrayList<Point>();
    dest.add(ocvPOut1);
    dest.add(ocvPOut2);
    dest.add(ocvPOut3);
    dest.add(ocvPOut4);
    Mat endM = Converters.vector_Point2f_to_Mat(dest);

    Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);

    Imgproc.warpPerspective(
        inputMat,
        outputMat,
        perspectiveTransform,
        new Size(resultWidth, resultHeight),
        Imgproc.INTER_AREA);
    Imgproc.GaussianBlur(outputMat, outputMat, new org.opencv.core.Size(5, 5), 5);
    Imgproc.resize(outputMat, outputMat, new Size(resultWidth / factor, resultHeight / factor));

    Imgproc.threshold(outputMat, outputMat, 127, 255, Imgproc.THRESH_TOZERO);
    return outputMat;
  }
Ejemplo n.º 7
0
 // javadoc: buildOpticalFlowPyramid(img, pyramid, winSize, maxLevel, withDerivatives, pyrBorder,
 // derivBorder, tryReuseInputImage)
 public static int buildOpticalFlowPyramid(
     Mat img,
     List<Mat> pyramid,
     Size winSize,
     int maxLevel,
     boolean withDerivatives,
     int pyrBorder,
     int derivBorder,
     boolean tryReuseInputImage) {
   Mat pyramid_mat = new Mat();
   int retVal =
       buildOpticalFlowPyramid_0(
           img.nativeObj,
           pyramid_mat.nativeObj,
           winSize.width,
           winSize.height,
           maxLevel,
           withDerivatives,
           pyrBorder,
           derivBorder,
           tryReuseInputImage);
   Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
   pyramid_mat.release();
   return retVal;
 }
Ejemplo n.º 8
0
 /**
  * Constructs the image pyramid which can be passed to "calcOpticalFlowPyrLK".
  *
  * @param img 8-bit input image.
  * @param pyramid output pyramid.
  * @param winSize window size of optical flow algorithm. Must be not less than <code>winSize
  *     </code> argument of "calcOpticalFlowPyrLK". It is needed to calculate required padding for
  *     pyramid levels.
  * @param maxLevel 0-based maximal pyramid level number.
  * @see <a
  *     href="http://docs.opencv.org/modules/video/doc/motion_analysis_and_object_tracking.html#buildopticalflowpyramid">org.opencv.video.Video.buildOpticalFlowPyramid</a>
  */
 public static int buildOpticalFlowPyramid(
     Mat img, List<Mat> pyramid, Size winSize, int maxLevel) {
   Mat pyramid_mat = new Mat();
   int retVal =
       buildOpticalFlowPyramid_1(
           img.nativeObj, pyramid_mat.nativeObj, winSize.width, winSize.height, maxLevel);
   Converters.Mat_to_vector_Mat(pyramid_mat, pyramid);
   return retVal;
 }
Ejemplo n.º 9
0
 // javadoc: Subdiv2D::getVoronoiFacetList(idx, facetList, facetCenters)
 public void getVoronoiFacetList(
     MatOfInt idx, List<MatOfPoint2f> facetList, MatOfPoint2f facetCenters) {
   Mat idx_mat = idx;
   Mat facetList_mat = new Mat();
   Mat facetCenters_mat = facetCenters;
   getVoronoiFacetList_0(
       nativeObj, idx_mat.nativeObj, facetList_mat.nativeObj, facetCenters_mat.nativeObj);
   Converters.Mat_to_vector_vector_Point2f(facetList_mat, facetList);
   facetList_mat.release();
   return;
 }
Ejemplo n.º 10
0
  public void setMatVector(String name, List<Mat> value) {
    Mat value_mat = Converters.vector_Mat_to_Mat(value);
    setMatVector_0(nativeObj, name, value_mat.nativeObj);

    return;
  }
Ejemplo n.º 11
0
 public List<Mat> getMatVector(String name) {
   List<Mat> retVal = new ArrayList<Mat>();
   Mat retValMat = new Mat(getMatVector_0(nativeObj, name));
   Converters.Mat_to_vector_Mat(retValMat, retVal);
   return retVal;
 }
Ejemplo n.º 12
0
  public static void getSquare(Mat imgSource) {
    Mat sourceImage = imgSource.clone();
    Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
    // convert the image to black and white does (8 bit)
    Imgproc.Canny(imgSource, imgSource, 50, 50);

    // apply gaussian blur to smoothen lines of dots
    Imgproc.GaussianBlur(imgSource, imgSource, new org.opencv.core.Size(5, 5), 5);

    // find the contours
    List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
    Imgproc.findContours(
        imgSource, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);

    double maxArea = -1;
    int maxAreaIdx = -1;
    // Log.d("size",Integer.toString(contours.size()));
    MatOfPoint temp_contour = contours.get(0); // the largest is at the
    // index 0 for starting
    // point
    MatOfPoint2f approxCurve = new MatOfPoint2f();
    MatOfPoint largest_contour = contours.get(0);
    // largest_contour.ge
    List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
    // Imgproc.drawContours(imgSource,contours, -1, new Scalar(0, 255, 0),
    // 1);

    for (int idx = 0; idx < contours.size(); idx++) {
      temp_contour = contours.get(idx);
      double contourarea = Imgproc.contourArea(temp_contour);
      // compare this contour to the previous largest contour found
      if (contourarea > maxArea) {
        // check if this contour is a square
        MatOfPoint2f new_mat = new MatOfPoint2f(temp_contour.toArray());
        int contourSize = (int) temp_contour.total();
        MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
        Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
        if (approxCurve_temp.total() == 4) {
          maxArea = contourarea;
          maxAreaIdx = idx;
          approxCurve = approxCurve_temp;
          largest_contour = temp_contour;
        }
      }
    }

    Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);

    double[] temp_double;
    temp_double = approxCurve.get(0, 0);
    Point p1 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p1,55,new Scalar(0,0,255));
    // Imgproc.warpAffine(sourceImage, dummy, rotImage,sourceImage.size());
    temp_double = approxCurve.get(1, 0);
    Point p2 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p2,150,new Scalar(255,255,255));
    temp_double = approxCurve.get(2, 0);
    Point p3 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p3,200,new Scalar(255,0,0));
    temp_double = approxCurve.get(3, 0);
    Point p4 = new Point(temp_double[0], temp_double[1]);
    // Core.circle(imgSource,p4,100,new Scalar(0,0,255));
    List<Point> source = getCorners(p1, p2, p3, p4);
    for (Point p : source) {
      // System.out.println(p);
    }
    Mat startM = Converters.vector_Point2f_to_Mat(source);
    // Imgproc.cvtColor(sourceImage, sourceImage, Imgproc.COLOR_BGR2GRAY);
    Mat result = warp(sourceImage, startM, 5);
    // result = warp(result,result,1);
    // Imgproc.cvtColor(result, result, Imgproc.COLOR_BGR2GRAY);
    Highgui.imwrite(output, result);
    // System.out.println("Done");
    // return result;
  }