Coin detection using android opencv - android

I am trying to detect coin ( circle ) detection using Opencv4Android.
So far I have tried two approaches
1 ) Regular method :
// convert image to grayscale
Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_RGBA2GRAY);
// apply Gaussian Blur
Imgproc.GaussianBlur(mGray, mGray, sSize5, 2, 2);
iMinRadius = 20;
iMaxRadius = 400;
iAccumulator = 300;
iCannyUpperThreshold = 100;
//apply houghCircles
Imgproc.HoughCircles(mGray, mIntermediateMat, Imgproc.CV_HOUGH_GRADIENT, 2.0, mGray.rows() / 8,
iCannyUpperThreshold, iAccumulator, iMinRadius, iMaxRadius);
if (mIntermediateMat.cols() > 0)
for (int x = 0; x < Math.min(mIntermediateMat.cols(), 10); x++) {
double vCircle[] = mIntermediateMat.get(0,x);
if (vCircle == null)
break;
pt.x = Math.round(vCircle[0]);
pt.y = Math.round(vCircle[1]);
radius = (int)Math.round(vCircle[2]);
// draw the found circle
Core.circle(mRgba, pt, radius, colorRed, iLineThickness);
}
2 ) Sobel and then Hough Cicles
// apply Gaussian Blur
Imgproc.GaussianBlur(mRgba, mRgba, sSize3, 2, 2,
Imgproc.BORDER_DEFAULT);
// / Convert it to grayscale
Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_RGBA2GRAY);
// / Gradient X
Imgproc.Sobel(mGray, grad_x, CvType.CV_16S, 1, 0, 3, scale, delta,
Imgproc.BORDER_DEFAULT);
Core.convertScaleAbs(grad_x, abs_grad_x);
// / Gradient Y
Imgproc.Sobel(mGray, grad_y, CvType.CV_16S, 0, 1, 3, scale, delta,
Imgproc.BORDER_DEFAULT);
Core.convertScaleAbs(grad_y, abs_grad_y);
// / Total Gradient (approximate)
Core.addWeighted(abs_grad_x, 0.5, abs_grad_y, 0.5, 0, grad);
iCannyUpperThreshold = 100;
Imgproc.HoughCircles(grad, mIntermediateMat,
Imgproc.CV_HOUGH_GRADIENT, 2.0, grad.rows() / 8,
iCannyUpperThreshold, iAccumulator, iMinRadius, iMaxRadius);
if (mIntermediateMat.cols() > 0)
for (int x = 0; x < Math.min(mIntermediateMat.cols(), 10); x++) {
double vCircle[] = mIntermediateMat.get(0, x);
if (vCircle == null)
break;
pt.x = Math.round(vCircle[0]);
pt.y = Math.round(vCircle[1]);
radius = (int) Math.round(vCircle[2]);
// draw the found circle
Core.circle(mRgba, pt, radius, colorRed, iLineThickness);
}
method one gives fair result in case of coin detection and method two gives better result
Out of these two methods second method processing is slow but results are good
Both of these methods are working when camera frmae is caputured using JavaCameraView or NativeCameraView from opencv library .
If I use same procedure on image captured from android naive image capture intent which returns Bitmap , I am unable to get any results at all i.e. no circles are detected at all.
In methods one sometimes I get circle detected when using Bitmap captured using android camera intent.
I also tried changing the captured bitmap as suggested in this Post but still no circle detection.
Can anybody tell me what modifications I have to do.
And also I want to know which algorithm will give better results in coin ( circle ) detection but with less processing.
I have played with various values of houghCircle method and also tried canny edge out put as intput to houghCircles but its not considerably good enough.

Related

Hough circle doesn't detect eyes iris

I want to detect eyes irises and their centers using Hough Circle algorithm.
I'm using this code:
private void houghCircle()
{
Bitmap obtainedBitmap = imagesList.getFirst();
/* convert bitmap to mat */
Mat mat = new Mat(obtainedBitmap.getWidth(),obtainedBitmap.getHeight(),
CvType.CV_8UC1);
Mat grayMat = new Mat(obtainedBitmap.getWidth(), obtainedBitmap.getHeight(),
CvType.CV_8UC1);
Utils.bitmapToMat(obtainedBitmap, mat);
/* convert to grayscale */
int colorChannels = (mat.channels() == 3) ? Imgproc.COLOR_BGR2GRAY : ((mat.channels() == 4) ? Imgproc.COLOR_BGRA2GRAY : 1);
Imgproc.cvtColor(mat, grayMat, colorChannels);
/* reduce the noise so we avoid false circle detection */
Imgproc.GaussianBlur(grayMat, grayMat, new Size(9, 9), 2, 2);
// accumulator value
double dp = 1.2d;
// minimum distance between the center coordinates of detected circles in pixels
double minDist = 100;
// min and max radii (set these values as you desire)
int minRadius = 0, maxRadius = 1000;
// param1 = gradient value used to handle edge detection
// param2 = Accumulator threshold value for the
// cv2.CV_HOUGH_GRADIENT method.
// The smaller the threshold is, the more circles will be
// detected (including false circles).
// The larger the threshold is, the more circles will
// potentially be returned.
double param1 = 70, param2 = 72;
/* create a Mat object to store the circles detected */
Mat circles = new Mat(obtainedBitmap.getWidth(), obtainedBitmap.getHeight(), CvType.CV_8UC1);
/* find the circle in the image */
Imgproc.HoughCircles(grayMat, circles, Imgproc.CV_HOUGH_GRADIENT, dp, minDist, param1, param2, minRadius, maxRadius);
/* get the number of circles detected */
int numberOfCircles = (circles.rows() == 0) ? 0 : circles.cols();
/* draw the circles found on the image */
for (int i=0; i<numberOfCircles; i++) {
/* get the circle details, circleCoordinates[0, 1, 2] = (x,y,r)
* (x,y) are the coordinates of the circle's center
*/
double[] circleCoordinates = circles.get(0, i);
int x = (int) circleCoordinates[0], y = (int) circleCoordinates[1];
Point center = new Point(x, y);
int radius = (int) circleCoordinates[2];
/* circle's outline */
Core.circle(mat, center, radius, new Scalar(0,
255, 0), 4);
/* circle's center outline */
Core.rectangle(mat, new Point(x - 5, y - 5),
new Point(x + 5, y + 5),
new Scalar(0, 128, 255), -1);
}
/* convert back to bitmap */
Utils.matToBitmap(mat, obtainedBitmap);
MediaStore.Images.Media.insertImage(getContentResolver(),obtainedBitmap, "testgray", "gray" );
}
But it doesn't detect iris in all images correctly. Specially, if the iris has a dark color like brown. How can I fix this code to detect the irises and their centers correctly?
EDIT: Here are some sample images (which I got from the web) that shows the performance of the algorithm (Please ignore the landmarks which are represented by the red squares):
In these images the algorithm doesn't detect all irises:
This image shows how the algorithm couldn't detect irises at all:
EDIT 2: Here is a code which uses Canny edge detection, but it causes the app to crash:
private void houghCircle()
{
Mat grayMat = new Mat();
Mat cannyEdges = new Mat();
Mat circles = new Mat();
Bitmap obtainedBitmap = imagesList.getFirst();
/* convert bitmap to mat */
Mat originalBitmap = new Mat(obtainedBitmap.getWidth(),obtainedBitmap.getHeight(),
CvType.CV_8UC1);
//Converting the image to grayscale
Imgproc.cvtColor(originalBitmap,grayMat,Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(grayMat, cannyEdges,10, 100);
Imgproc.HoughCircles(cannyEdges, circles,
Imgproc.CV_HOUGH_GRADIENT,1, cannyEdges.rows() / 15); //now circles is filled with detected circles.
//, grayMat.rows() / 8);
Mat houghCircles = new Mat();
houghCircles.create(cannyEdges.rows(),cannyEdges.cols()
,CvType.CV_8UC1);
//Drawing lines on the image
for(int i = 0 ; i < circles.cols() ; i++)
{
double[] parameters = circles.get(0,i);
double x, y;
int r;
x = parameters[0];
y = parameters[1];
r = (int)parameters[2];
Point center = new Point(x, y);
//Drawing circles on an image
Core.circle(houghCircles,center,r,
new Scalar(255,0,0),1);
}
//Converting Mat back to Bitmap
Utils.matToBitmap(houghCircles, obtainedBitmap);
MediaStore.Images.Media.insertImage(getContentResolver(),obtainedBitmap, "testgray", "gray" );
}
This is the error I get in the log
FATAL EXCEPTION: Thread-28685
CvException [org.opencv.core.CvException: cv::Exception: /hdd2/buildbot/slaves/slave_ardbeg1/50-SDK/opencv/modules/imgproc/src/color.cpp:3739: error: (-215) scn == 3 || scn == 4 in function void cv::cvtColor(cv::InputArray, cv::OutputArray, int, int)
]
at org.opencv.imgproc.Imgproc.cvtColor_1(Native Method)
at org.opencv.imgproc.Imgproc.cvtColor(Imgproc.java:4598)
Which is caused by this line: Imgproc.cvtColor(originalBitmap,grayMat,Imgproc.COLOR_BGR2GRAY);
Can anyone please tell me how this error can solved? Perhaps adding a canny edge detection will improve the results.
Hough circles work better on well defined circles. They are not good with things like iris.
After some thresholding, morphological operations or canny edge detection, feature detection methods like MSER work much better for iris detection.
Here is a similar question with a solution if you are looking for some code.
As you want to detect iris using hough transform (there are others), you had better studying the Canny edge detector and its parameters.
cv::HoughCircles takes the Canny-hysteresis threshold in param1. Investigating Canny alone, you get the impression of good threshold range.
Maybe instead of gaussian blur, you apply a better denoising (non local means with say h=32 and window sizes 5 and 15), and also try to harmonize the image contrast, e.g., using contrast limited adaptive histogram equalization (cv::CLAHE).
Harmonization is to make sure all (highlight and shadow) eyes map to similar intensity range.
I wanted to know if those images are the images you processed or if you like took a cell phone snapshot of your screen to upload them here. Because the irises are bigger than the maximum radius you set in your code. Therefor I don't understand how you could find any iris at all. The irises in the first image have a radius of over 20. So you shouldn't be able to detect them.
You should set the radii to the radius range you expect your irises to be.

Detecting a square object from an image using OpenCv in android [duplicate]

This might have been answered but I desperately need an answer for this. I want to find the largest square or rectangle in an image using OpenCV in Android. All of the solutions that I found are C++ and I tried converting it but it doesn't work and I do not know where I'm wrong.
private Mat findLargestRectangle(Mat original_image) {
Mat imgSource = original_image;
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
Imgproc.Canny(imgSource, imgSource, 100, 100);
//I don't know what to do in here
return imgSource;
}
What I am trying to accomplish in here is to create a new image that is based on the largest square found in the original image (return value Mat image).
This is what I want to happen:
1 http://img14.imageshack.us/img14/7855/s7zr.jpg
It's also okay that I just get the four points of the largest square and I think I can take it from there. But it would be better if I can just return the cropped image.
After canny
1- you need to reduce noises with gaussian blur and find all the contours
2- find and list all the contours' areas.
3- the largest contour will be nothing but the painting.
4- now use perpective transformation to transform your shape to a rectangle.
check sudoku solver examples to see the similar processing problem. (largest contour + perspective)
Took me a while to convert the C++ code to Java, but here it is :-)
Warning ! Raw code, totally not optimized and all.
I decline any liability in cases of injury or lethal accident
List<MatOfPoint> squares = new ArrayList<MatOfPoint>();
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
if (Math.random()>0.80) {
findSquares(inputFrame.rgba().clone(),squares);
}
Mat image = inputFrame.rgba();
Imgproc.drawContours(image, squares, -1, new Scalar(0,0,255));
return image;
}
int thresh = 50, N = 11;
// helper function:
// finds a cosine of angle between vectors
// from pt0->pt1 and from pt0->pt2
double angle( Point pt1, Point pt2, Point pt0 ) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1*dx2 + dy1*dy2)/Math.sqrt((dx1*dx1 + dy1*dy1)*(dx2*dx2 + dy2*dy2) + 1e-10);
}
// returns sequence of squares detected on the image.
// the sequence is stored in the specified memory storage
void findSquares( Mat image, List<MatOfPoint> squares )
{
squares.clear();
Mat smallerImg=new Mat(new Size(image.width()/2, image.height()/2),image.type());
Mat gray=new Mat(image.size(),image.type());
Mat gray0=new Mat(image.size(),CvType.CV_8U);
// down-scale and upscale the image to filter out the noise
Imgproc.pyrDown(image, smallerImg, smallerImg.size());
Imgproc.pyrUp(smallerImg, image, image.size());
// find squares in every color plane of the image
for( int c = 0; c < 3; c++ )
{
extractChannel(image, gray, c);
// try several threshold levels
for( int l = 1; l < N; l++ )
{
//Cany removed... Didn't work so well
Imgproc.threshold(gray, gray0, (l+1)*255/N, 255, Imgproc.THRESH_BINARY);
List<MatOfPoint> contours=new ArrayList<MatOfPoint>();
// find contours and store them all as a list
Imgproc.findContours(gray0, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
MatOfPoint approx=new MatOfPoint();
// test each contour
for( int i = 0; i < contours.size(); i++ )
{
// approximate contour with accuracy proportional
// to the contour perimeter
approx = approxPolyDP(contours.get(i), Imgproc.arcLength(new MatOfPoint2f(contours.get(i).toArray()), true)*0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if( approx.toArray().length == 4 &&
Math.abs(Imgproc.contourArea(approx)) > 1000 &&
Imgproc.isContourConvex(approx) )
{
double maxCosine = 0;
for( int j = 2; j < 5; j++ )
{
// find the maximum cosine of the angle between joint edges
double cosine = Math.abs(angle(approx.toArray()[j%4], approx.toArray()[j-2], approx.toArray()[j-1]));
maxCosine = Math.max(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if( maxCosine < 0.3 )
squares.add(approx);
}
}
}
}
}
void extractChannel(Mat source, Mat out, int channelNum) {
List<Mat> sourceChannels=new ArrayList<Mat>();
List<Mat> outChannel=new ArrayList<Mat>();
Core.split(source, sourceChannels);
outChannel.add(new Mat(sourceChannels.get(0).size(),sourceChannels.get(0).type()));
Core.mixChannels(sourceChannels, outChannel, new MatOfInt(channelNum,0));
Core.merge(outChannel, out);
}
MatOfPoint approxPolyDP(MatOfPoint curve, double epsilon, boolean closed) {
MatOfPoint2f tempMat=new MatOfPoint2f();
Imgproc.approxPolyDP(new MatOfPoint2f(curve.toArray()), tempMat, epsilon, closed);
return new MatOfPoint(tempMat.toArray());
}
There are some related questions here in SO. Check them out:
OpenCV C++/Obj-C: Detecting a sheet of paper / Square Detection
How do I recognize squares in this image?
There is also an example shipped with OpenCV:
https://code.ros.org/trac/opencv/browser/trunk/opencv/samples/cpp/squares.cpp?rev=4079
Once you have the rectangle, you can align the picture by computing the homography with the rectangle corners and applying a perspective transform.

How to extract lines from each contour in OpenCV for Android?

I'd like to examine each Canny detected edge and look for the main lines in it (to check if they seem to shape a rectangle, for example if 2 pairs of lines are parallel etc.).
Imgproc.HoughLinesP does what I want, but it gives the lines from the whole image, and I want to know which lines come from the same edges.
I tried also FindContours, and looking for main lines in each contour with approxPolyDP, but this doesn't look adapted because there are often gaps in Canny detected edges. This gives contours of the edges and not the edges themselves.
Here is a test image example :
How can I get a set of lines for each shape ?
Based on Miki's answer, here is what I've done :
Canny
HoughLinesP (or LineSegmentDetector, as you want) : to detect lines
ConnectedComponents : to find Canny "contours" in the Canny image.
Dilate with a 3x3 kernel (see below)
For each Hough line : take a few pixels from the line and look for the most frequent value (ignore 0's).
For example, I chose {p1 , 0.75*p1 + 0.25*p2, 0.5*p1 + 0.5*p2, 0.25*p1 + 0.75*p2, p2}, so if my values are {1,2,0,2,2} then the line belongs to the connectedComponent number 2.
Dilating is to be sure you didn't miss a contour by only 1 pixel (but don't use it if your objects are too close).
This allows to "tag" HoughLines with the color of the contour they belong to.
All of these functions can be found in Imgproc module, this works in OpenCV 3.0 only and gives the desired result.
Here is a code :
// open image
File root = Environment.getExternalStorageDirectory();
File file = new File(root, "image_test.png");
Mat mRGBA = Imgcodecs.imread(file.getAbsolutePath());
Imgproc.cvtColor(mRGBA, mRGBA, Imgproc.COLOR_BGR2RGB);
Mat mGray = new Mat();
Imgproc.cvtColor(mRGBA, mGray, Imgproc.COLOR_RGBA2GRAY);
Imgproc.medianBlur(mGray, mGray, 7);
/* Main part */
Imgproc.Canny(mGray, mGray, 50, 60, 3, true);
Mat aretes = new Mat();
Imgproc.HoughLinesP(mGray, aretes, 1, 0.01745329251, 30, 10, 4);
/**
* Tag Canny edges in the gray picture with indexes from 1 to 65535 (0 = background)
* (Make sure there are less than 255 components or convert mGray to 16U before)
*/
int nb = Imgproc.connectedComponents(mGray,mGray,8,CvType.CV_16U);
Imgproc.dilate(mGray, mGray, Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3,3)));
// for each Hough line
for (int x = 0; x < aretes.rows(); x++) {
double[] vec = aretes.get(x, 0);
double x1 = vec[0],
y1 = vec[1],
x2 = vec[2],
y2 = vec[3];
/**
* Take 5 points from the line
*
* x----x----x----x----x
* P1 P2
*/
double[] pixel_values = new double[5];
pixel_values[0] = mGray.get((int) y1, (int) x1)[0];
pixel_values[1] = mGray.get((int) (y1*0.75 + y2*0.25), (int) (x1*0.75 + x2*0.25))[0];
pixel_values[2] = mGray.get((int) ((y1 + y2) *0.5), (int) ((x1 + x2) *0.5))[0];
pixel_values[3] = mGray.get((int) (y1*0.25 + y2*0.75), (int) (x1*0.25 + x2*0.75))[0];
pixel_values[4] = mGray.get((int) y2, (int) x2)[0];
/**
* Look for the most frequent value
* (To make it readable, the following code accepts the line only if there are at
* least 3 good pixels)
*/
double value;
Arrays.sort(pixel_values);
if (pixel_values[1] == pixel_values[3] || pixel_values[0] == pixel_values[2] || pixel_values[2] == pixel_values[4]) {
value = pixel_values[2];
}
else {
value = 0;
}
/**
* Now value is the index of the connected component (or 0 if it's a bad line)
* You can store it in an other array, here I'll just draw the line with the value
*/
if (value != 0) {
Imgproc.line(mRGBA,new Point(x1,y1),new Point(x2,y2),new Scalar((value * 41 + 50) % 255, (value * 69 + 100) % 255, (value * 91 + 60) % 255),3);
}
}
Imgproc.cvtColor(mRGBA, mRGBA, Imgproc.COLOR_RGB2BGR);
File file2 = new File(root, "image_test_OUT.png");
Imgcodecs.imwrite(file2.getAbsolutePath(), mRGBA);
If you're using OpenCV 3.0.0 you can use LineSegmentDetector, and "AND" your detected lines with the contours.
I provide a sample code below. It's C++ (sorry about that), but you can easily translate in Java. At least you see how to use LineSegmentDetector and how extract common lines for each contour. You'll see the lines on the same contour with the same color.
#include <opencv2/opencv.hpp>
using namespace cv;
using namespace std;
int main()
{
RNG rng(12345);
Mat3b img = imread("path_to_image");
Mat1b gray;
cvtColor(img, gray, COLOR_BGR2GRAY);
Mat3b result;
cvtColor(gray, result, COLOR_GRAY2BGR);
// Detect lines
Ptr<LineSegmentDetector> detector = createLineSegmentDetector();
vector<Vec4i> lines;
detector->detect(gray, lines);
// Draw lines
Mat1b lineMask(gray.size(), uchar(0));
for (int i = 0; i < lines.size(); ++i)
{
line(lineMask, Point(lines[i][0], lines[i][1]), Point(lines[i][2], lines[i][3]), Scalar(255), 2);
}
// Compute edges
Mat1b edges;
Canny(gray, edges, 200, 400);
// Find contours
vector<vector<Point>> contours;
findContours(edges.clone(), contours, CV_RETR_EXTERNAL, CV_CHAIN_APPROX_NONE);
for (int i = 0; i < contours.size(); ++i)
{
// Draw each contour
Mat1b contourMask(gray.size(), uchar(0));
drawContours(contourMask, contours, i, Scalar(255), 2); // Better use 1 here. 2 is just for visualization purposes
// AND the contour and the lines
Mat1b bor;
bitwise_and(contourMask, lineMask, bor);
// Draw the common pixels with a random color
vector<Point> common;
findNonZero(bor, common);
Vec3b color = Vec3b(rng.uniform(0, 255), rng.uniform(0, 255), rng.uniform(0, 255));
for (int j = 0; j < common.size(); ++j)
{
result(common[j]) = color;
}
}
imshow("result", result);
waitKey();
return 0;
}

Android OpenCV - detect curves from Houghlines

I am writing an app using OpenCV 2.4.3.2 for Android.
my app is about license plate recognition.
there are a few ways to do it , I chose to do the following:
1. convert the image to HSV color space
2. threshold image according to license plate HSV (in my country they are yellow...)
3. smooth the image with a Gaussian Blur
4. Detect edges
5. find contours
6. fund houghlines
7. from the houglines, detect curves that match rectangle
I am stuck at 7, I can't find a way to successfully detect the rectangles from the houglines.
I would very much appreciate a code sample in Java, since most of the examples are in C/C++ and converting it is not so straightforward.
here is my code (right now I am just drawing the lines...):
Imgproc.cvtColor(inputFrame, mRGBMat, Imgproc.COLOR_RGBA2BGR);
// convert HSC color space
Imgproc.cvtColor(mRGBMat, mHSVMat, Imgproc.COLOR_BGR2HSV);
// Filter out colors which are out of range (license plate hue ~ 14)
Core.inRange(mHSVMat, new Scalar(9, 70, 80, 0), new Scalar(30, 255,
255, 0), mGrayMat);
// some smoothing of the image
for (int i = 0; i < 10; i++) {
Imgproc.GaussianBlur(mGrayMat, mGrayMat, new Size(9, 9), 2, 2);
}
Mat kernel = Imgproc.getStructuringElement(Imgproc.MORPH_DILATE,
new Size(3, 3), new Point(1, 1));
Imgproc.Canny(mGrayMat, mGrayMat0, 48, 120);
Imgproc.dilate(mGrayMat0, mGrayMat0, kernel);
kernel.release();
List<MatOfPoint> contours = new Vector<MatOfPoint>();
Imgproc.findContours(mGrayMat0, contours, mHirerchy,
Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
Mat lines = new Mat(); // finds houghlines in the contours
Imgproc.HoughLinesP(mGrayMat0, lines, 1, Math.PI / 180, 1);
for (int x = 0; x < lines.cols(); x++) {
double[] vec = lines.get(0, x);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Core.line(mRgba, start, end, RECT_COLOR, 1);
}
I've written such an algorithm before. You classify the lines into two types:
1) vertical
2) horizontal
x) outliers for deletion
Then you classify the lines more into two subtypes each:
1a) vertical, the left border
1b) vertical, the right border
1x) outliers for deletion
2a), 2b), 2x).
Get the average slopes and intercept points of these lines and you have your "rectangle".

OpenCV crop function fatal signal 11

Hello I am doing an android app which uses OpenCV to detect rectangles/squares, to detect them I am using functions (modified a bit) from squares.cpp. Points of every square found I am storing in vector> squares, then i pass it to the function which choose the biggest one and store it in vector theBiggestSq. The problem is with the cropping function which code i will paste below (i will post the link to youtube showing the problem too). If the actual square is far enough from the camera it works ok but if i will close it a bit in some point it will hang. I will post the print screen of the problem from LogCat and there are the points printed out (the boundaries points taken from theBiggestSq vector, maybe it will help to find the solution).
void cutAndSave(vector<Point> theBiggestSq, Mat image){
RotatedRect box = minAreaRect(Mat(theBiggestSq));
// Draw bounding box in the original image (debug purposes)
//cv::Point2f vertices[4];
//box.points(vertices);
//for (int i = 0; i < 4; ++i)
//{
//cv::line(img, vertices[i], vertices[(i + 1) % 4], cv::Scalar(0, 255, 0), 1, CV_AA);
//}
//cv::imshow("box", img);
//cv::imwrite("box.png", img);
// Set Region of Interest to the area defined by the box
Rect roi;
roi.x = box.center.x - (box.size.width / 2);
roi.y = box.center.y - (box.size.height / 2);
roi.width = box.size.width;
roi.height = box.size.height;
// Crop the original image to the defined ROI
//bmp=Bitmap.createBitmap(box.size.width / 2, box.size.height / 2, Bitmap.Config.ARGB_8888);
Mat crop = image(roi);
//Mat crop = image(Rect(roi.x, roi.y, roi.width, roi.height)).clone();
//Utils.matToBitmap(crop*.clone()* ,bmp);
imwrite("/sdcard/OpenCVTest/1.png", bmp);
imshow("crop", crop);
}
video of my app and its problems
cords printed respectively are: roi.x roi.y roi.width roi.height
Another problem is that the boundaries drawn should have a green colour but as you see on the video they are distorted (flexed like those boundaries would be made from glass?).
Thank you for any help. I am new in openCV doing it from only one month so please be tolerant.
EDIT:
drawing code:
//draw//
for( size_t i = 0; i < squares.size(); i++ )
{
const Point* p = &squares[i][0];
int n = (int)squares[i].size();
polylines(mBgra, &p, &n, 1, true, Scalar(255,255,0), 5, 10);
//Rect rect = boundingRect(cv::Mat(squares[i]));
//rectangle(mBgra, rect.tl(), rect.br(), cv::Scalar(0,255,0), 2, 8, 0);
}
This error basically tells you the cause - your ROI exceeds the image dimensions. This means that when you are extracting Rect roi from RotatedRect box then either x or y are smaller than zero, or the width/height pushes the dimensions outside the image. You should check this using something like
// Propose rectangle from data
int proposedX = box.center.x - (box.size.width / 2);
int proposedY = box.center.y - (box.size.height / 2);
int proposedW = box.size.width;
int proposedH = box.size.height;
// Ensure top-left edge is within image
roi.x = proposedX < 0 ? 0 : proposedX;
roi.y = proposedY < 0 ? 0 : proposedY;
// Ensure bottom-right edge is within image
roi.width =
(roi.x - 1 + proposedW) > image.cols ? // Will this roi exceed image?
(image.cols - 1 - roi.x) // YES: make roi go to image edge
: proposedW; // NO: continue as proposed
// Similar for height
roi.height = (roi.y - 1 + proposedH) > image.rows ? (image.rows - 1 - roi.y) : proposedH;

Categories

Resources