public static void detectMotion(Mat src, Mat dst) { // temp = prevgray; // prevgray = gray; // gray = temp; cvt_YUVtoGRAY(src, Sample1View.gray); if (!Sample1View.prevgray.empty()) { Video.calcOpticalFlowFarneback( Sample1View.prevgray, Sample1View.gray, Sample1View.flow, 0.5, 3, 15, 3, 5, 1.2, 0); Imgproc.cvtColor(Sample1View.prevgray, dst, Imgproc.COLOR_GRAY2RGB); draw_opticflow(Sample1View.flow, dst, 36, new Scalar(0, 255, 0)); // cvt_YUVtoGRAY(src, dst); } // gray.copyTo(prevgray); Sample1View.prevgray = Sample1View.gray; try { Thread.sleep(20); } catch (Exception e) { // TODO Auto-generated catch block e.printStackTrace(); } // cvt_YUVtoGRAY(src, dst); }
void hpTrack(Mat mRgba, WorkingHeadPose hp, Rect[] facesArray) { MatOfByte status = new MatOfByte(); // Mat prev=new Mat(mRgba.width(),mRgba.height(),CvType.CV_8UC1); // Mat curr=new Mat(mRgba.width(),mRgba.height(),CvType.CV_8UC1); Mat prev = new Mat(mRgba.size(), CvType.CV_8UC1); Mat curr = new Mat(mRgba.size(), CvType.CV_8UC1); MatOfPoint2f tmpCorners = new MatOfPoint2f(); MatOfFloat err = new MatOfFloat(); int i, j, count; TermCriteria optical_flow_termination_criteria = new TermCriteria(); // =(TermCriteria.MAX_ITER|TermCriteria.EPS,20,.3);// ( // CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, .3 ); optical_flow_termination_criteria.epsilon = .3; optical_flow_termination_criteria.maxCount = 20; // Good features to track Imgproc.cvtColor(hp.previousFrame, prev, Imgproc.COLOR_RGBA2GRAY, 0); Imgproc.cvtColor(mRgba, curr, Imgproc.COLOR_RGBA2GRAY, 0); // Video.calcOpticalFlowPyrLK(prev, curr,hp.previousCorners,tmpCorners,status,new MatOfFloat(), // new Size(10,10), 3,optical_flow_termination_criteria, 0, 1); // http://stackoverflow.com/questions/12561292/android-using-calcopticalflowpyrlk-with-matofpoint2f if (hp.previousCorners.total() > 0) // Video.calcOpticalFlowPyrLK(prev, curr,hp.previousCorners,tmpCorners,status,new // MatOfFloat(), new Size(11,11),5,optical_flow_termination_criteria, 0, 1); Video.calcOpticalFlowPyrLK(prev, curr, hp.previousCorners, tmpCorners, status, err); /*Point[] pointp = hp.previousCorners.toArray(); Point[] pointn = tmpCorners.toArray(); for (Point px : pointp) { Core.circle(mRgba, px, 15, new Scalar(255,0,0)); } for (Point py : pointn) { Core.circle(mRgba, py, 5, new Scalar(0,0,255)); } */ // Point a,b; // a=new Point(); // b=new Point(); /*if(TempFace.length>0) { for( i = 0; i < hp.tempcorners.total(); i++ ) { //center.x=facearray1[0].x + hp.corners.toList().get(i).x; //center.y=facearray1[0].y + hp.corners.toList().get(i).y; Point a = new Point(TempFace[0].tl().x+hp.tempcorners.toList().get(i).x,TempFace[0].tl().x+hp.tempcorners.toList().get(i).y); Point b= new Point(TempFace[0].tl().y+tmpCorners.toList().get(i).x,TempFace[0].tl().y+tmpCorners.toList().get(i).y); Core.line(mRgba, a,b, new Scalar(255,0,0),2); } } */ count = 0; for (i = 0; i < hp.cornerCount; i += 1) { if (i == hp.corners.total()) break; if (status.toList().get(i) == 1) { count += 1; } } // Replace headPose->corners and headPose->modelPoints // imran // http://stackoverflow.com/questions/11273588/how-to-convert-matofpoint-to-matofpoint2f-in-opencv-java-api MatOfPoint2f corners2f = new MatOfPoint2f(); hp.corners.convertTo(corners2f, CvType.CV_32FC2); List<Point> plist = new ArrayList<Point>(); for (i = 0, j = 0; i < hp.cornerCount; i += 1) { if (i == hp.corners.total()) break; if (status.toList().get(i) == 1) { // plist.add(tmpCorners.toList().get(i)); corners2f .toList() .set(j, tmpCorners.toList().get(i)); // .get(i)=tmpCorners.toList().get(i); hp.modelPoints.toList().set(j, hp.modelPoints.toList().get(i)); // =hp.modelPoints.toList().get(i); // hp.corners[j] = tmpCorners[i]; // headPose->modelPoints[j] = headPose->modelPoints[i]; j += 1; } } // corners2f.fromList(plist); corners2f.convertTo(hp.corners, CvType.CV_32S); Log.i("CournerCount", "Reassigning" + count); hp.cornerCount = count; }