PerspectiveTransform and Crop Rectangle in Android using openCV - android

I receive an image from Android camera using CvCameraViewListener2
My program detect a yellow rectangle plate.
The problem is after I know the 4 points which are the corner of rectangle, I want to crop the rectangle and shown in the JavaCameraView
Here is my program's screenshot when it detect a rectangle.
First, I apply perspectiveTransform to transform an image into correct perspective. But the result is very bad like
I have no idea how to correctly do this.
The following is my code.
private Mat getTransform(Mat inputFrame, List<Point> corners) {
Mat outputFrame = inputFrame.clone();
//compute width of new image
double widthA = Math.sqrt(Math.pow(corners.get(3).x - corners.get(2).x, 2) + Math.pow(corners.get(3).y - corners.get(2).y, 2));
double widthB = Math.sqrt(Math.pow(corners.get(1).x - corners.get(0).x, 2) + Math.pow(corners.get(1).y - corners.get(0).y, 2));
double maxWidth = max(widthA, widthB);
// compute heigh of new image
double heightA = Math.sqrt(Math.pow(corners.get(1).x - corners.get(3).x, 2) + Math.pow(corners.get(1).y - corners.get(3).y, 2));
double heightB = Math.sqrt(Math.pow(corners.get(0).x - corners.get(2).x, 2) + Math.pow(corners.get(0).y - corners.get(2).y, 2));
double maxHeight = max(heightA, heightB);
Log.d(MAINTAG, "Width: " + maxWidth + ", Height: " + maxHeight);
List<Point> output = new ArrayList<>();
output.add(new Point(0, 0));
output.add(new Point(0, maxHeight));
output.add(new Point(maxWidth, maxHeight));
output.add(new Point(maxWidth, 0));
Mat inputMat = Converters.vector_Point2f_to_Mat(corners);
Mat outputMat = Converters.vector_Point2f_to_Mat(output);
Mat outputTransform = Imgproc.getPerspectiveTransform(inputMat, outputMat);
Imgproc.warpPerspective(inputFrame, outputFrame, outputTransform, new Size(outputMat.width(), outputMat.height()), Imgproc.INTER_LINEAR);
return outputFrame;
The corners is arranged as topLeft, topRight, bottomLeft, bottomRight
After correct the perspective, the screen gets freeze!!!
Second, I want to crop the rectangle after I correct its perspective and show in JavaCameraView. How do I do that?
Thank you for all solutions and suggestions.

Related

Overlay a image over live frame - OpenCV4Android

I'm trying to overlay on the live frame a little image previosuly selected by the user. I already have the image's path from a previous activity. My problem is that I am not able to show the image on the frame.
I'm trying to detect a rectangle on the frame, and over the rectangle display the image selected. I could detect the rectangle, but now i can't display the image on any part of the frame (I don't care about the rectangle right now).
I've been trying to do it with the explanations from Adding Image Overlay OpenCV for Android and add watermark small image to large image opencv4android but it didn't worked for me.
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat gray = inputFrame.gray();
Mat dst = inputFrame.rgba();
Imgproc.pyrDown(gray, dsIMG, new Size(gray.cols() / 2, gray.rows() / 2));
Imgproc.pyrUp(dsIMG, usIMG, gray.size());
Imgproc.Canny(usIMG, bwIMG, 0, threshold);
Imgproc.dilate(bwIMG, bwIMG, new Mat(), new Point(-1, 1), 1);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
cIMG = bwIMG.clone();
Imgproc.findContours(cIMG, contours, hovIMG, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
for (MatOfPoint cnt : contours) {
MatOfPoint2f curve = new MatOfPoint2f(cnt.toArray());
Imgproc.approxPolyDP(curve, approxCurve, 0.02 * Imgproc.arcLength(curve, true), true);
int numberVertices = (int) approxCurve.total();
double contourArea = Imgproc.contourArea(cnt);
if (Math.abs(contourArea) < 100) {
continue;
}
//Rectangle detected
if (numberVertices >= 4 && numberVertices <= 6) {
List<Double> cos = new ArrayList<>();
for (int j = 2; j < numberVertices + 1; j++) {
cos.add(angle(approxCurve.toArray()[j % numberVertices], approxCurve.toArray()[j - 2], approxCurve.toArray()[j - 1]));
}
Collections.sort(cos);
double mincos = cos.get(0);
double maxcos = cos.get(cos.size() - 1);
if (numberVertices == 4 && mincos >= -0.3 && maxcos <= 0.5) {
//Small watermark image
Mat a = imread(img_path);
Mat bSubmat = dst.submat(0, dst.rows() -1 , 0, dst.cols()-1);
a.copyTo(bSubmat);
}
}
}
return dst;
}
NOTE: img_path is the path of the selected image I want to display over the frame. I got it from the previous activity.
By now, I just want to display the image over the frame. Later, I will try to display it on the same position where it found the rectangle.
Please, any suggestion or recommendation is welcome, as I am new with OpenCV. I'm sorry for my english, but feel free to ask me anything I didn't explain correctly. I'll do my best to explain it better.
Thanks a lot!
If you just want to display the image as an overlay, and not save it as part of the video, you may find it easier to simple display it in a separate view above the video view. This will likely use less processing, battery etc also.
If you want to draw onto the camera image bitmap then the following will allow you do that:
Bitmap cameraBitmap = BitmapFactory.decodeByteArray(bytes,0,bytes.length, opt);
Canvas camImgCanvas = new Canvas(cameraBitmap);
Drawable d = ContextCompat.getDrawable(getActivity(), R.drawable.myDrawable);
//Centre the drawing
int bitMapWidthCenter = cameraBitmap.getWidth()/2;
int bitMapheightCenter = cameraBitmap.getHeight()/2;
d.setBounds(bitMapWidthCenter, bitMapheightCenter, bitMapWidthCenter+d.getIntrinsicWidth(),
bitMapheightCenter+d.getIntrinsicHeight());
//And draw it...
d.draw(camImgCanvas);

Hough circle doesn't detect eyes iris

I want to detect eyes irises and their centers using Hough Circle algorithm.
I'm using this code:
private void houghCircle()
{
Bitmap obtainedBitmap = imagesList.getFirst();
/* convert bitmap to mat */
Mat mat = new Mat(obtainedBitmap.getWidth(),obtainedBitmap.getHeight(),
CvType.CV_8UC1);
Mat grayMat = new Mat(obtainedBitmap.getWidth(), obtainedBitmap.getHeight(),
CvType.CV_8UC1);
Utils.bitmapToMat(obtainedBitmap, mat);
/* convert to grayscale */
int colorChannels = (mat.channels() == 3) ? Imgproc.COLOR_BGR2GRAY : ((mat.channels() == 4) ? Imgproc.COLOR_BGRA2GRAY : 1);
Imgproc.cvtColor(mat, grayMat, colorChannels);
/* reduce the noise so we avoid false circle detection */
Imgproc.GaussianBlur(grayMat, grayMat, new Size(9, 9), 2, 2);
// accumulator value
double dp = 1.2d;
// minimum distance between the center coordinates of detected circles in pixels
double minDist = 100;
// min and max radii (set these values as you desire)
int minRadius = 0, maxRadius = 1000;
// param1 = gradient value used to handle edge detection
// param2 = Accumulator threshold value for the
// cv2.CV_HOUGH_GRADIENT method.
// The smaller the threshold is, the more circles will be
// detected (including false circles).
// The larger the threshold is, the more circles will
// potentially be returned.
double param1 = 70, param2 = 72;
/* create a Mat object to store the circles detected */
Mat circles = new Mat(obtainedBitmap.getWidth(), obtainedBitmap.getHeight(), CvType.CV_8UC1);
/* find the circle in the image */
Imgproc.HoughCircles(grayMat, circles, Imgproc.CV_HOUGH_GRADIENT, dp, minDist, param1, param2, minRadius, maxRadius);
/* get the number of circles detected */
int numberOfCircles = (circles.rows() == 0) ? 0 : circles.cols();
/* draw the circles found on the image */
for (int i=0; i<numberOfCircles; i++) {
/* get the circle details, circleCoordinates[0, 1, 2] = (x,y,r)
* (x,y) are the coordinates of the circle's center
*/
double[] circleCoordinates = circles.get(0, i);
int x = (int) circleCoordinates[0], y = (int) circleCoordinates[1];
Point center = new Point(x, y);
int radius = (int) circleCoordinates[2];
/* circle's outline */
Core.circle(mat, center, radius, new Scalar(0,
255, 0), 4);
/* circle's center outline */
Core.rectangle(mat, new Point(x - 5, y - 5),
new Point(x + 5, y + 5),
new Scalar(0, 128, 255), -1);
}
/* convert back to bitmap */
Utils.matToBitmap(mat, obtainedBitmap);
MediaStore.Images.Media.insertImage(getContentResolver(),obtainedBitmap, "testgray", "gray" );
}
But it doesn't detect iris in all images correctly. Specially, if the iris has a dark color like brown. How can I fix this code to detect the irises and their centers correctly?
EDIT: Here are some sample images (which I got from the web) that shows the performance of the algorithm (Please ignore the landmarks which are represented by the red squares):
In these images the algorithm doesn't detect all irises:
This image shows how the algorithm couldn't detect irises at all:
EDIT 2: Here is a code which uses Canny edge detection, but it causes the app to crash:
private void houghCircle()
{
Mat grayMat = new Mat();
Mat cannyEdges = new Mat();
Mat circles = new Mat();
Bitmap obtainedBitmap = imagesList.getFirst();
/* convert bitmap to mat */
Mat originalBitmap = new Mat(obtainedBitmap.getWidth(),obtainedBitmap.getHeight(),
CvType.CV_8UC1);
//Converting the image to grayscale
Imgproc.cvtColor(originalBitmap,grayMat,Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(grayMat, cannyEdges,10, 100);
Imgproc.HoughCircles(cannyEdges, circles,
Imgproc.CV_HOUGH_GRADIENT,1, cannyEdges.rows() / 15); //now circles is filled with detected circles.
//, grayMat.rows() / 8);
Mat houghCircles = new Mat();
houghCircles.create(cannyEdges.rows(),cannyEdges.cols()
,CvType.CV_8UC1);
//Drawing lines on the image
for(int i = 0 ; i < circles.cols() ; i++)
{
double[] parameters = circles.get(0,i);
double x, y;
int r;
x = parameters[0];
y = parameters[1];
r = (int)parameters[2];
Point center = new Point(x, y);
//Drawing circles on an image
Core.circle(houghCircles,center,r,
new Scalar(255,0,0),1);
}
//Converting Mat back to Bitmap
Utils.matToBitmap(houghCircles, obtainedBitmap);
MediaStore.Images.Media.insertImage(getContentResolver(),obtainedBitmap, "testgray", "gray" );
}
This is the error I get in the log
FATAL EXCEPTION: Thread-28685
CvException [org.opencv.core.CvException: cv::Exception: /hdd2/buildbot/slaves/slave_ardbeg1/50-SDK/opencv/modules/imgproc/src/color.cpp:3739: error: (-215) scn == 3 || scn == 4 in function void cv::cvtColor(cv::InputArray, cv::OutputArray, int, int)
]
at org.opencv.imgproc.Imgproc.cvtColor_1(Native Method)
at org.opencv.imgproc.Imgproc.cvtColor(Imgproc.java:4598)
Which is caused by this line: Imgproc.cvtColor(originalBitmap,grayMat,Imgproc.COLOR_BGR2GRAY);
Can anyone please tell me how this error can solved? Perhaps adding a canny edge detection will improve the results.
Hough circles work better on well defined circles. They are not good with things like iris.
After some thresholding, morphological operations or canny edge detection, feature detection methods like MSER work much better for iris detection.
Here is a similar question with a solution if you are looking for some code.
As you want to detect iris using hough transform (there are others), you had better studying the Canny edge detector and its parameters.
cv::HoughCircles takes the Canny-hysteresis threshold in param1. Investigating Canny alone, you get the impression of good threshold range.
Maybe instead of gaussian blur, you apply a better denoising (non local means with say h=32 and window sizes 5 and 15), and also try to harmonize the image contrast, e.g., using contrast limited adaptive histogram equalization (cv::CLAHE).
Harmonization is to make sure all (highlight and shadow) eyes map to similar intensity range.
I wanted to know if those images are the images you processed or if you like took a cell phone snapshot of your screen to upload them here. Because the irises are bigger than the maximum radius you set in your code. Therefor I don't understand how you could find any iris at all. The irises in the first image have a radius of over 20. So you shouldn't be able to detect them.
You should set the radii to the radius range you expect your irises to be.

Detecting a square object from an image using OpenCv in android [duplicate]

This might have been answered but I desperately need an answer for this. I want to find the largest square or rectangle in an image using OpenCV in Android. All of the solutions that I found are C++ and I tried converting it but it doesn't work and I do not know where I'm wrong.
private Mat findLargestRectangle(Mat original_image) {
Mat imgSource = original_image;
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(imgSource, imgSource, 100, 100);
//I don't know what to do in here
return imgSource;
}
What I am trying to accomplish in here is to create a new image that is based on the largest square found in the original image (return value Mat image).
This is what I want to happen:
1 http://img14.imageshack.us/img14/7855/s7zr.jpg
It's also okay that I just get the four points of the largest square and I think I can take it from there. But it would be better if I can just return the cropped image.
After canny
1- you need to reduce noises with gaussian blur and find all the contours
2- find and list all the contours' areas.
3- the largest contour will be nothing but the painting.
4- now use perpective transformation to transform your shape to a rectangle.
check sudoku solver examples to see the similar processing problem. (largest contour + perspective)
Took me a while to convert the C++ code to Java, but here it is :-)
Warning ! Raw code, totally not optimized and all.
I decline any liability in cases of injury or lethal accident
List<MatOfPoint> squares = new ArrayList<MatOfPoint>();
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
if (Math.random()>0.80) {
findSquares(inputFrame.rgba().clone(),squares);
}
Mat image = inputFrame.rgba();
Imgproc.drawContours(image, squares, -1, new Scalar(0,0,255));
return image;
}
int thresh = 50, N = 11;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( Point pt1, Point pt2, Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/Math.sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
void findSquares( Mat image, List<MatOfPoint> squares )
{
squares.clear();
Mat smallerImg=new Mat(new Size(image.width()/2, image.height()/2),image.type());
Mat gray=new Mat(image.size(),image.type());
Mat gray0=new Mat(image.size(),CvType.CV_8U);
// down-scale and upscale the image to filter out the noise
Imgproc.pyrDown(image, smallerImg, smallerImg.size());
Imgproc.pyrUp(smallerImg, image, image.size());
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
extractChannel(image, gray, c);
// try several threshold levels
for( int l = 1; l < N; l++ )
{
//Cany removed... Didn't work so well
Imgproc.threshold(gray, gray0, (l+1)*255/N, 255, Imgproc.THRESH_BINARY);
List<MatOfPoint> contours=new ArrayList<MatOfPoint>();
// find contours and store them all as a list
Imgproc.findContours(gray0, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
MatOfPoint approx=new MatOfPoint();
// test each contour
for( int i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approx = approxPolyDP(contours.get(i), Imgproc.arcLength(new MatOfPoint2f(contours.get(i).toArray()), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.toArray().length == 4 &&
Math.abs(Imgproc.contourArea(approx)) > 1000 &&
Imgproc.isContourConvex(approx) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = Math.abs(angle(approx.toArray()[j%4], approx.toArray()[j-2], approx.toArray()[j-1]));
maxCosine = Math.max(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.add(approx);
}
}
}
}
}
void extractChannel(Mat source, Mat out, int channelNum) {
List<Mat> sourceChannels=new ArrayList<Mat>();
List<Mat> outChannel=new ArrayList<Mat>();
Core.split(source, sourceChannels);
outChannel.add(new Mat(sourceChannels.get(0).size(),sourceChannels.get(0).type()));
Core.mixChannels(sourceChannels, outChannel, new MatOfInt(channelNum,0));
Core.merge(outChannel, out);
}
MatOfPoint approxPolyDP(MatOfPoint curve, double epsilon, boolean closed) {
MatOfPoint2f tempMat=new MatOfPoint2f();
Imgproc.approxPolyDP(new MatOfPoint2f(curve.toArray()), tempMat, epsilon, closed);
return new MatOfPoint(tempMat.toArray());
}
There are some related questions here in SO. Check them out:
OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection
How do I recognize squares in this image?
There is also an example shipped with OpenCV:
https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/cpp/squares.cpp?rev=4079
Once you have the rectangle, you can align the picture by computing the homography with the rectangle corners and applying a perspective transform.

openCV4android detect Shape and Color (HSV)

I'm a beginner in openCV4android and I would like to get some help if possible
.
I'm trying to detect colored triangles,squares or circles using my Android phone camera but I don't know where to start.
I have been reading OReilly Learning OpenCV book and I got some knowledge about OpenCV.
Here is what I want to make:
1- Get the tracking color (just the color HSV) of the object by touching the screen
- I have already done this by using the color blob example from the OpenCV4android example
2- Find on the camera shapes like triangles, squares or circles based on the color choosed before.
I have just found examples of finding shapes within an image. What I would like to make is finding using the camera on real time.
Any help would be appreciated.
Best regards and have a nice day.
If you plan to implement NDK for your opencv stuff then you can use the same idea they are using in OpenCV tutorial 2-Mixedprocessing.
// on camera frames call your native method
public Mat onCameraFrame(CvCameraViewFrame inputFrame)
{
mRgba = inputFrame.rgba();
Nativecleshpdetect(mRgba.getNativeObjAddr()); // native method call to perform color and object detection
// the method getNativeObjAddr gets the address of the Mat object(camera frame) and passes it to native side as long object so that you dont have to create and destroy Mat object on each frame
}
public native void Nativecleshpdetect(long matAddrRgba);
In Native side
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_Nativecleshpdetect(JNIEnv*, jobject,jlong addrRgba1)
{
Mat& mRgb1 = *(Mat*)addrRgba1;
// mRgb1 is a mat object which points to the address of the input camera frame, so all the manipulations you do here will reflect on the live camera frame
//once you have your mat object(i.e mRgb1 ) you can implement all the colour and shape detection algorithm you have learnt in opencv book
}
since all manipulations are done using pointers you have to be bit careful handling them. hope this helps
Why dont you make use of JavaCV i think its a better alternative..you dont have to use the NDK at all for this..
try this:
http://code.google.com/p/javacv/
If you check OpenCV's Back Projection tutorial it does what you are looking for (and a bit more).
Back Projection:
"In terms of statistics, the values stored in the BackProjection
matrix represent the probability that a pixel in a image belongs to
the region with the selected color."
I have converted that tutorial to OpenCV4Android (2.4.8) like you were looking for, it does not use Android NDK. You can see all the code here at Github.
You can also check this answer for more details.
Though its a bit late i would like to make a contribution to the question.
1- Get the tracking color (just the color HSV) of the object by
touching the screen - I have already done this by using the color blob
example from the OpenCV4android example
Implement OnTouchListener to your activity
onTouch function
int cols = mRgba.cols();
int rows = mRgba.rows();
int xOffset = (mOpenCvCameraView.getWidth() - cols) / 2;
int yOffset = (mOpenCvCameraView.getHeight() - rows) / 2;
int x = (int) event.getX() - xOffset;
int y = (int) event.getY() - yOffset;
Log.i(TAG, "Touch image coordinates: (" + x + ", " + y + ")");
if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return false;
Rect touchedRect = new Rect();
touchedRect.x = (x > 4) ? x - 4 : 0;
touchedRect.y = (y > 4) ? y - 4 : 0;
touchedRect.width = (x + 4 < cols) ? x + 4 - touchedRect.x : cols - touchedRect.x;
touchedRect.height = (y + 4 < rows) ? y + 4 - touchedRect.y : rows - touchedRect.y;
Mat touchedRegionRgba = mRgba.submat(touchedRect);
Mat touchedRegionHsv = new Mat();
Imgproc.cvtColor(touchedRegionRgba, touchedRegionHsv, Imgproc.COLOR_RGB2HSV_FULL);
// Calculate average color of touched region
mBlobColorHsv = Core.sumElems(touchedRegionHsv);
int pointCount = touchedRect.width * touchedRect.height;
for (int i = 0; i < mBlobColorHsv.val.length; i++)
mBlobColorHsv.val[i] /= pointCount;
mBlobColorRgba = converScalarHsv2Rgba(mBlobColorHsv);
mColor = mBlobColorRgba.val[0] + ", " + mBlobColorRgba.val[1] + ", " + mBlobColorRgba.val[2] + ", " + mBlobColorRgba.val[3];
Log.i(TAG, "Touched rgba color: (" + mBlobColorRgba.val[0] + ", " + mBlobColorRgba.val[1] +
", " + mBlobColorRgba.val[2] + ", " + mBlobColorRgba.val[3] + ")");
mRGBA is a mat object which was initiated in onCameraViewStarted as
mRgba = new Mat(height, width, CvType.CV_8UC4);
And for the 2nd part:
2- Find on the camera shapes like triangles, squares or circles based
on the color choosed before.
I have tried to find out the selected contours shape using approxPolyDP
MatOfPoint2f contour2f = new MatOfPoint2f(contours.get(0).toArray());
//Processing on mMOP2f1 which is in type MatOfPoint2f
double approxDistance = Imgproc.arcLength(contour2f, true) * 0.02;
Imgproc.approxPolyDP(contour2f, approxCurve, approxDistance, true);
//Convert back to MatOfPoint
MatOfPoint points = new MatOfPoint(approxCurve.toArray());
System.out.println("points length" + points.toArray().length);
if( points.toArray().length == 5)
{
System.out.println("Pentagon");
mShape = "Pentagon";
}
else if(points.toArray().length > 5)
{
System.out.println("Circle");
Imgproc.drawContours(mRgba, contours, 0, new Scalar(255, 255, 0, -1));
mShape = "Circle";
}
else if(points.toArray().length == 4)
{
System.out.println("Square");
mShape = "Square";
}
else if(points.toArray().length == 4)
{
System.out.println("Triangle");
mShape = "Triangle";
}
This was done on onCameraFrame function after i obtained the contour list
For me if the length of point array was more than 5 it was usually a circle. But there is other algorithm to obtain circle and its attributes.

Android OpenCV Find Largest Square or Rectangle

This might have been answered but I desperately need an answer for this. I want to find the largest square or rectangle in an image using OpenCV in Android. All of the solutions that I found are C++ and I tried converting it but it doesn't work and I do not know where I'm wrong.
private Mat findLargestRectangle(Mat original_image) {
Mat imgSource = original_image;
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(imgSource, imgSource, 100, 100);
//I don't know what to do in here
return imgSource;
}
What I am trying to accomplish in here is to create a new image that is based on the largest square found in the original image (return value Mat image).
This is what I want to happen:
1 http://img14.imageshack.us/img14/7855/s7zr.jpg
It's also okay that I just get the four points of the largest square and I think I can take it from there. But it would be better if I can just return the cropped image.
After canny
1- you need to reduce noises with gaussian blur and find all the contours
2- find and list all the contours' areas.
3- the largest contour will be nothing but the painting.
4- now use perpective transformation to transform your shape to a rectangle.
check sudoku solver examples to see the similar processing problem. (largest contour + perspective)
Took me a while to convert the C++ code to Java, but here it is :-)
Warning ! Raw code, totally not optimized and all.
I decline any liability in cases of injury or lethal accident
List<MatOfPoint> squares = new ArrayList<MatOfPoint>();
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
if (Math.random()>0.80) {
findSquares(inputFrame.rgba().clone(),squares);
}
Mat image = inputFrame.rgba();
Imgproc.drawContours(image, squares, -1, new Scalar(0,0,255));
return image;
}
int thresh = 50, N = 11;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( Point pt1, Point pt2, Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/Math.sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
void findSquares( Mat image, List<MatOfPoint> squares )
{
squares.clear();
Mat smallerImg=new Mat(new Size(image.width()/2, image.height()/2),image.type());
Mat gray=new Mat(image.size(),image.type());
Mat gray0=new Mat(image.size(),CvType.CV_8U);
// down-scale and upscale the image to filter out the noise
Imgproc.pyrDown(image, smallerImg, smallerImg.size());
Imgproc.pyrUp(smallerImg, image, image.size());
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
extractChannel(image, gray, c);
// try several threshold levels
for( int l = 1; l < N; l++ )
{
//Cany removed... Didn't work so well
Imgproc.threshold(gray, gray0, (l+1)*255/N, 255, Imgproc.THRESH_BINARY);
List<MatOfPoint> contours=new ArrayList<MatOfPoint>();
// find contours and store them all as a list
Imgproc.findContours(gray0, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
MatOfPoint approx=new MatOfPoint();
// test each contour
for( int i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approx = approxPolyDP(contours.get(i), Imgproc.arcLength(new MatOfPoint2f(contours.get(i).toArray()), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.toArray().length == 4 &&
Math.abs(Imgproc.contourArea(approx)) > 1000 &&
Imgproc.isContourConvex(approx) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = Math.abs(angle(approx.toArray()[j%4], approx.toArray()[j-2], approx.toArray()[j-1]));
maxCosine = Math.max(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.add(approx);
}
}
}
}
}
void extractChannel(Mat source, Mat out, int channelNum) {
List<Mat> sourceChannels=new ArrayList<Mat>();
List<Mat> outChannel=new ArrayList<Mat>();
Core.split(source, sourceChannels);
outChannel.add(new Mat(sourceChannels.get(0).size(),sourceChannels.get(0).type()));
Core.mixChannels(sourceChannels, outChannel, new MatOfInt(channelNum,0));
Core.merge(outChannel, out);
}
MatOfPoint approxPolyDP(MatOfPoint curve, double epsilon, boolean closed) {
MatOfPoint2f tempMat=new MatOfPoint2f();
Imgproc.approxPolyDP(new MatOfPoint2f(curve.toArray()), tempMat, epsilon, closed);
return new MatOfPoint(tempMat.toArray());
}
There are some related questions here in SO. Check them out:
OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection
How do I recognize squares in this image?
There is also an example shipped with OpenCV:
https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/cpp/squares.cpp?rev=4079
Once you have the rectangle, you can align the picture by computing the homography with the rectangle corners and applying a perspective transform.

Categories

Resources