Android OpenCV FindRectangle algo not working properly - android

I am trying to use this code http://androiderstuffs.blogspot.com/2016/06/detecting-rectangle-using-opencv-java.html to detect card. But instead of putting card on plane surface, I will be holding this card in hand in-front of my Head. Problem is, its not detecting card rectangle. I am new to OpenCV. See my code below, this code will highlight all found rectangles in output image. Problem is, it never find card rectangle.
private void findRectangleOpen(Bitmap image) throws Exception {
Mat tempor = new Mat();
Mat src = new Mat();
Utils.bitmapToMat(image, tempor);
Imgproc.cvtColor(tempor, src, Imgproc.COLOR_BGR2RGB);
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
List<Mat> blurredChannel = new ArrayList<Mat>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<Mat>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = {c, 0};
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 10, 20, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1
// ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel,
Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,
Imgproc.THRESH_BINARY,
(src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(),
Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
int i = 0;
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve,
Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= 200 && area <= 40000) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4),
curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.3) {
Imgproc.drawContours(src, contours, i, new Scalar(255, 0, 0), 3);
Bitmap bmp;
bmp = Bitmap.createBitmap(src.cols(), src.rows(),
Bitmap.Config.ARGB_8888);
Utils.matToBitmap(src, bmp);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
bmp.compress(Bitmap.CompressFormat.PNG, 100, stream);
byte[] byteArray = stream.toByteArray();
//File origFile = getFileForSaving();
savePhoto(byteArray);
bmp.recycle();
}
}
i++;
}
}
}
}
private static double angle(org.opencv.core.Point p1, org.opencv.core.Point p2, org.opencv.core.Point p0) {
double dx1 = p1.x - p0.x;
double dy1 = p1.y - p0.y;
double dx2 = p2.x - p0.x;
double dy2 = p2.y - p0.y;
return (dx1 * dx2 + dy1 * dy2)
/ sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2)
+ 1e-10);
}
Sample output image is:
Output of detecting rectangle

Related

CV exception in android

This is the exception im getting
CvException [org.opencv.core.CvException: cv::Exception: /hdd2/buildbot/slaves/slave_ardbeg1/50-SDK/opencv/modules/imgproc/src/hough.cpp:712: error: (-5) The source image must be 8-bit, single-channel in function CvSeq* cvHoughLines2(CvArr*, void*, int, double, double, int, double, double)
mat = new Mat();
edges = new Mat();
lines = new Mat();
mRgba = new Mat(612, 816, CvType.CV_8UC1);
Utils.bitmapToMat(bitmap, mat);
Imgproc.Canny(mat, edges, 50, 90);
int threshold = 50;
int minLineSize = 20;
int lineGap = 20;
try {
Imgproc.HoughLines(mat, lines, 1, Math.PI / 180, threshold, minLineSize, lineGap);
for (int x = 0; x < lines.cols(); x++) {
double[] vec = lines.get(0, x);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Core.line(mRgba, start, end, new Scalar(255, 0, 0), 3);
}
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mRgba, bmp);
bitmap = bmp;
} catch (Exception e) {
e.printStackTrace();
System.out.println("e = " + e);
}
Your image in function HoughLines isn't right. You're not formatting it right before putting it into the function.
Try prepare image like this:
https://stackoverflow.com/a/7975315/5577679

How to detect (Count) Hair from image using OpenCV?

I have try below code using OpenCV functions cvtColor,Canny and HoughLinesP but not able to get accurate result or not work in some cases.
private boolean opencvProcessCount(Uri picFileUri) {
hairCount = 0;
totalC = 0;
//Log.e(">>>>>>>>","count " + picFileUri);
try {
InputStream iStream = getContentResolver().openInputStream(picFileUri);
byte[] im = getBytes(iStream);
BitmapFactory.Options opt = new BitmapFactory.Options();
opt.inDither = true;
opt.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap image = BitmapFactory.decodeByteArray(im, 0, im.length);
Mat mYuv = new Mat();
Utils.bitmapToMat(image, mYuv);
Mat mRgba = new Mat();
Imgproc.cvtColor(mYuv, mRgba, Imgproc.COLOR_RGB2GRAY, 4);
Imgproc.Canny(mRgba, mRgba, 80, 90);
Mat lines = new Mat();
int threshold = 80;
int minLineSize = 30;
int lineGap = 100;
Imgproc.HoughLinesP(mRgba, lines, 1, Math.PI/180, threshold, minLineSize, lineGap);
for (int x = 0; x < lines.rows(); x++)
{
double[] vec = lines.get(x, 0);
double x1 = vec[0],
y1 = vec[1],
x2 = vec[2],
y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
double dx = x1 - x2;
double dy = y1 - y2;
double dist = Math.sqrt (dx*dx + dy*dy);
totalC ++;
Log.e(">>>>>>>>","dist " + dist);
if(dist>300.d)
{
hairCount ++;
// Log.e(">>>>>>>>","count " + x);
Imgproc.line(mRgba, start, end, new Scalar(0,255, 0, 255),5);// here initimg is the original image.
}// show those lines that have length greater than 300
}
Log.e(">>>>>>>>",totalC+" out hairCount " + hairCount);
// Imgproc.
} catch (Throwable e) {
// Log.e(">>>>>>>>","count " + e.getMessage());
e.printStackTrace();
}
return false;
}
Below are sample images to count hair :
I think you will find this article interesting:
http://www.cs.ubc.ca/~lowe/papers/aij87.pdf
They take a 2D bitmap, apply canny edge detector and then regroup segments of the different edges based on how likely they belong to a same object - in this case hair (and give criterias for such regrouping).
I think you could use this to know how many objects there are on the image, and if the image contains only hair, then you'd have a count for hair.

OpenCV circles color detection

I am trying to detect red circles on camera frames,i was trying to do the same as here https://solarianprogrammer.com/2015/05/08/detect-red-circles-image-using-opencv/ and here Android OpenCV Color detection, but it only detects blue circle O_o(screenshots link: https://drive.google.com/open?id=0B-pbp_K-xNkEWmxRa2tSMUZlUEE), here goes my code:
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Mat lowerRed = new Mat();
Mat upperRed = new Mat();
Mat redHueImage = new Mat();
Mat green;
Mat blue;
Mat orange;
Mat rgba = inputFrame.rgba();
Core.flip(rgba, rgba, 1);
Mat HSVMat = new Mat();
Imgproc.medianBlur(rgba,rgba,3);
Imgproc.cvtColor(rgba, HSVMat, Imgproc.COLOR_BGR2HSV, 0);
Core.inRange(HSVMat,new Scalar(0,100,100),new Scalar(10,255,255),lowerRed);
Core.inRange(HSVMat,new Scalar(160,100,100),new Scalar(179,255,255),upperRed);
Core.addWeighted(lowerRed,1.0,upperRed,1.0,0.0,redHueImage);
Imgproc.GaussianBlur(redHueImage, redHueImage, new Size(9, 9), 2, 2);
double dp = 1.2d;
double minDist = 100;
int minRadius = 0;
int maxRadius = 0;
double param1 = 100, param2 = 20;
Mat circles = new Mat();
Imgproc.HoughCircles(redHueImage, circles, Imgproc.HOUGH_GRADIENT, dp, redHueImage.rows()/8, param1, param2, minRadius, maxRadius);
int numCircles = (circles.rows() == 0) ? 0 : circles.cols();
for (int i = 0; i < numCircles; i++) {
double[] circleCoordinates = circles.get(0, i);
int x = (int) circleCoordinates[0], y = (int) circleCoordinates[1];
Point center = new Point(x, y);
int radius = (int) circleCoordinates[2];
Imgproc.circle(rgba, center, radius, new Scalar(0, 255, 0), 4);
}
lowerRed.release();
upperRed.release();
HSVMat.release();
return rgba;
}

How to detect marked black regions inside largest Rectangle Contour?

I can detect largest contour the answer sheet (20 questions, each have 4 alternative)
After the draw largest contour, what shall I do? Divide matris the rectangle by 20x4 cell? Or find countour again but this time inside the rectangle? I dont know what I need. Just I want to get which is marked.
I looked at this documant.
How to codding "image gridding and division"?
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
return findLargestRectangle(inputFrame.rgba());
}
private Mat findLargestRectangle(Mat original_image) {
Mat imgSource = original_image;
hierarchy = new Mat();
//convert the image to black and white
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BGR2GRAY);
//convert the image to black and white does (8 bit)
Imgproc.Canny(imgSource, imgSource, 50, 50);
//apply gaussian blur to smoothen lines of dots
Imgproc.GaussianBlur(imgSource, imgSource, new Size(5, 5), 5);
//find the contours
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(imgSource, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();
double maxArea = -1;
int maxAreaIdx = -1;
MatOfPoint temp_contour = contours.get(0); //the largest is at the index 0 for starting point
MatOfPoint2f approxCurve = new MatOfPoint2f();
Mat largest_contour = contours.get(0);
List<MatOfPoint> largest_contours = new ArrayList<MatOfPoint>();
for (int idx = 0; idx < contours.size(); idx++) {
temp_contour = contours.get(idx);
double contourarea = Imgproc.contourArea(temp_contour);
//compare this contour to the previous largest contour found
if (contourarea > maxArea) {
//check if this contour is a square
MatOfPoint2f new_mat = new MatOfPoint2f( temp_contour.toArray() );
int contourSize = (int)temp_contour.total();
Imgproc.approxPolyDP(new_mat, approxCurve, contourSize*0.05, true);
if (approxCurve.total() == 4) {
maxArea = contourarea;
maxAreaIdx = idx;
largest_contours.add(temp_contour);
largest_contour = temp_contour;
}
}
}
MatOfPoint temp_largest = largest_contours.get(largest_contours.size()-1);
largest_contours = new ArrayList<MatOfPoint>();
largest_contours.add(temp_largest);
Imgproc.cvtColor(imgSource, imgSource, Imgproc.COLOR_BayerBG2RGB);
Imgproc.drawContours(imgSource, contours, maxAreaIdx, new Scalar(0, 255, 0), 1);
Log.d(TAG, "Largers Contour:" + contours.get(maxAreaIdx).toString());
return imgSource;
}
UPDATE 1:
I want to thank you #sturkmen for the his answer. I can read and find black regions now. Here the Android codes:
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
View _view = inflater.inflate(R.layout.fragment_main, container, false);
// Inflate the layout for this fragment
Button btnTest = (Button) _view.findViewById(R.id.btnTest);
btnTest.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
Mat img = Imgcodecs.imread(mediaStorageDir().getPath() + "/" + "test2.jpg");
if (img.empty()) {
Log.d("Fragment", "IMG EMPTY");
}
Mat gray = new Mat();
Mat thresh = new Mat();
//convert the image to black and white
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
//convert the image to black and white does (8 bit)
Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
Mat temp = thresh.clone();
//find the contours
Mat hierarchy = new Mat();
Mat corners = new Mat(4,1, CvType.CV_32FC2);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(temp, contours,hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();
for (int idx = 0; idx < contours.size(); idx++)
{
MatOfPoint contour = contours.get(idx);
MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
RotatedRect minRect = Imgproc.minAreaRect( contour_points );
Point[] rect_points = new Point[4];
minRect.points( rect_points );
if(minRect.size.height > img.width() / 2)
{
List<Point> srcPoints = new ArrayList<Point>(4);
srcPoints.add(rect_points[2]);
srcPoints.add(rect_points[3]);
srcPoints.add(rect_points[0]);
srcPoints.add(rect_points[1]);
corners = Converters.vector_Point_to_Mat(
srcPoints, CvType.CV_32F);
}
}
Imgproc.erode(thresh, thresh, new Mat(), new Point(-1,-1), 10);
Imgproc.dilate(thresh, thresh, new Mat(), new Point(-1,-1), 5);
Mat results = new Mat(1000,250,CvType.CV_8UC3);
Mat quad = new Mat(1000,250,CvType.CV_8UC1);
List<Point> dstPoints = new ArrayList<Point>(4);
dstPoints.add(new Point(0, 0));
dstPoints.add(new Point(1000, 0));
dstPoints.add(new Point(1000, 250));
dstPoints.add(new Point(0, 250));
Mat quad_pts = Converters.vector_Point_to_Mat(
dstPoints, CvType.CV_32F);
Mat transmtx = Imgproc.getPerspectiveTransform(corners, quad_pts);
Imgproc.warpPerspective( img, results, transmtx, new Size(1000,250));
Imgproc.warpPerspective( thresh, quad, transmtx, new Size(1000,250));
Imgproc.resize(quad,quad,new Size(20,5));
Imgcodecs.imwrite("results.png",quad);
//show image
showImage(quad);
//store image
storeImage(quad);
}
});
return _view;
}
public void showImage (Mat img) {
ImageView imgView = (ImageView) getActivity().findViewById(R.id.sampleImageView);
//Mat mRgba = new Mat();
//mRgba = Utils.loadResource(MainAct.this, R.drawable.your_image,Highgui.CV_LOAD_IMAGE_COLOR);
Bitmap img2 = Bitmap.createBitmap(img.cols(), img.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(img, img2);
imgView.setImageBitmap(img2);
}
public File mediaStorageDir () {
File _mediaStorageDir = new File(Environment.getExternalStorageDirectory()
+ "/Android/data/"
+ getActivity().getApplicationContext().getPackageName());
return _mediaStorageDir;
}
public void storeImage(Mat matImg) {
Bitmap bitmapImg = Bitmap.createBitmap(matImg.cols(), matImg.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(matImg, bitmapImg);
String timeStamp = new SimpleDateFormat("ddMMyyyy_HHmm").format(new Date());
File mediaFile;
String mImageName="IMG_"+ timeStamp +".jpg";
mediaFile = new File(mediaStorageDir().getPath() + File.separator + mImageName);
File pictureFile = mediaFile;
try {
FileOutputStream fos = new FileOutputStream(pictureFile);
bitmapImg.compress(Bitmap.CompressFormat.PNG, 90, fos);
fos.close();
} catch (FileNotFoundException e) {
Log.d("FragmentMain", "File not found: " + e.getMessage());
} catch (IOException e) {
Log.d("FragmentMain", "Error accessing file: " + e.getMessage());
}
}
here is my trial code as a sample.
i hope it will be helpful. ( i will add some explanation about the code later)
Test Image ( edited your image. having an empty and invalid double mark )
(source: opencv.org)
Result Image
(source: opencv.org)
#include <opencv2/highgui.hpp>
#include <opencv2/imgproc.hpp>
using namespace cv;
using namespace std;
int main( int argc, const char** argv )
{
Mat img = imread(argv[1]);
if(img.empty())
{
return -1;
}
Size dims(20,5); // this variable should be changed according input
Mat gray,thresh;
cvtColor(img, gray, COLOR_BGR2GRAY);
threshold(gray, thresh, 0, 255, THRESH_BINARY_INV + THRESH_OTSU);
Mat quad(img.size(), CV_8UC1); // should be improved
Mat results(img.size(), CV_8UC3);
vector<Point2f> quad_pts;
quad_pts.push_back(cv::Point2f(0, 0));
quad_pts.push_back(cv::Point2f(quad.cols, 0));
quad_pts.push_back(cv::Point2f(quad.cols, quad.rows));
quad_pts.push_back(cv::Point2f(0, quad.rows));
vector<Point2f> corners;
vector<vector<Point> > contours;
findContours(thresh.clone(), contours, RETR_EXTERNAL, CHAIN_APPROX_SIMPLE);
for( size_t i = 0; i< contours.size(); i++ )
{
RotatedRect minRect = minAreaRect( Mat(contours[i]) );
// rotated rectangle
Point2f rect_points[4];
minRect.points( rect_points );
if(Rect(minRect.boundingRect()).width > img.cols / 2) // should be improved
for( int j = 0; j < 4; j++ )
{
Point2f pt = quad_pts[j];
Point2f nearest_pt = rect_points[0];
float dist = norm( pt - nearest_pt );
for( int k = 1; k < 4; k++ )
{
if( norm( pt - rect_points[k] ) < dist )
{
dist = norm( pt - rect_points[k] );
nearest_pt = rect_points[k];
}
}
corners.push_back( nearest_pt );
}
}
erode(thresh,thresh,Mat(),Point(-1,-1), 10); // should be improved
dilate(thresh,thresh,Mat(),Point(-1,-1), 5); // should be improved
Mat transmtx = getPerspectiveTransform(corners, quad_pts);
warpPerspective( img, results, transmtx, img.size()); // Create a Mat To Show results
warpPerspective( thresh, quad, transmtx, img.size());
resize(quad,quad,dims);
for(int i = 0; i < quad.cols; i++)
{
String answer = "";
answer += quad.at<uchar>(1,i) == 0 ? "" : "A";
answer += quad.at<uchar>(2,i) == 0 ? "" : "B";
answer += quad.at<uchar>(3,i) == 0 ? "" : "C";
answer += quad.at<uchar>(4,i) == 0 ? "" : "D";
if( answer.length() > 1 ) answer = "X"; // Double mark
int y = 0;
if( answer == "A" ) y = results.rows / dims.height;
if( answer == "B" ) y = results.rows / dims.height *2;
if( answer == "C" ) y = results.rows / dims.height *3;
if( answer == "D" ) y = results.rows / dims.height *4;
if( answer == "" ) answer = "[-]";
putText( results, answer, Point( 50* i + 15, 30 + y), FONT_HERSHEY_PLAIN, 2, Scalar(0,0,255),2);
}
imshow( "results", results );
waitKey(0);
return 0;
}
as a challenge to myself i tried to implement main part in JAVA ( a newcomer copy paste code )
Mat img = Imgcodecs.imread("test.jpg");
Mat gray = new Mat();
Mat thresh = new Mat();
//convert the image to black and white
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
//convert the image to black and white does (8 bit)
Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
Mat temp = thresh.clone();
//find the contours
Mat hierarchy = new Mat();
Mat corners = new Mat(4,1,CvType.CV_32FC2);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(temp, contours,hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();
for (int idx = 0; idx < contours.size(); idx++)
{
MatOfPoint contour = contours.get(idx);
MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
RotatedRect minRect = Imgproc.minAreaRect( contour_points );
Point[] rect_points = new Point[4];
minRect.points( rect_points );
if(minRect.size.height > img.width() / 2)
{
List<Point> srcPoints = new ArrayList<Point>(4);
srcPoints.add(rect_points[2]);
srcPoints.add(rect_points[3]);
srcPoints.add(rect_points[0]);
srcPoints.add(rect_points[1]);
corners = Converters.vector_Point_to_Mat(
srcPoints, CvType.CV_32F);
}
}
Imgproc.erode(thresh, thresh, new Mat(), new Point(-1,-1), 10);
Imgproc.dilate(thresh, thresh, new Mat(), new Point(-1,-1), 5);
Mat results = new Mat(1000,250,CvType.CV_8UC3);
Mat quad = new Mat(1000,250,CvType.CV_8UC1);
List<Point> dstPoints = new ArrayList<Point>(4);
dstPoints.add(new Point(0, 0));
dstPoints.add(new Point(1000, 0));
dstPoints.add(new Point(1000, 250));
dstPoints.add(new Point(0, 250));
Mat quad_pts = Converters.vector_Point_to_Mat(
dstPoints, CvType.CV_32F);
Mat transmtx = Imgproc.getPerspectiveTransform(corners, quad_pts);
Imgproc.warpPerspective( img, results, transmtx, new Size(1000,250));
Imgproc.warpPerspective( thresh, quad, transmtx, new Size(1000,250));
Imgproc.resize(quad,quad,new Size(20,5));
Imgcodecs.imwrite("results.png",quad);
here is the (20x5px) result image :
I imrove #sturkmen' s code.
fragment_main.xml
<FrameLayout xmlns:android="http://schemas.android.com/apk/res/android"
xmlns:tools="http://schemas.android.com/tools"
android:layout_width="match_parent"
android:layout_height="match_parent"
tools:context="{your package name}.FragmentMain">
<!-- TODO: Update blank fragment layout -->
<LinearLayout
android:orientation="vertical"
android:layout_width="match_parent"
android:layout_height="match_parent">
<Button
android:id="#+id/btnTest"
android:layout_width="match_parent"
android:layout_height="80dp"
android:text="Test" />
<ImageView
android:id="#+id/sampleImageView"
android:layout_width="match_parent"
android:layout_height="150dp"
android:layout_centerHorizontal="true"/>
</LinearLayout>
</framelayout>
AndroidManifest.xml
Add this line for write permission.
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
FragmentMain.java
IMAGE FILE: Add Internal Storage/Android/Data/Your Package Folder/test.JPG
public View onCreateView(LayoutInflater inflater, ViewGroup container,
Bundle savedInstanceState) {
View _view = inflater.inflate(R.layout.fragment_main, container, false);
Button btnTest = (Button) _view.findViewById(R.id.btnTest);
btnTest.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
Mat img = Imgcodecs.imread(mediaStorageDir().getPath() + "/" + "test.JPG");
if (img.empty()) {
Log.d("FragmentMain", "Empty Image");
}
Size dims = new Size (20,5);
Mat gray = new Mat();
Mat thresh = new Mat();
//convert the image to black and white
Imgproc.cvtColor(img, gray, Imgproc.COLOR_BGR2GRAY);
storeImage(gray);
//convert the image to black and white does (8 bit)
Imgproc.threshold(gray, thresh, 0, 255, Imgproc.THRESH_BINARY_INV + Imgproc.THRESH_OTSU);
storeImage(thresh);
Mat temp = thresh.clone();
//find the contours
Mat hierarchy = new Mat();
Mat corners = new Mat(4,1, CvType.CV_32FC2);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(temp, contours,hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();
for (int idx = 0; idx < contours.size(); idx++)
{
MatOfPoint contour = contours.get(idx);
MatOfPoint2f contour_points = new MatOfPoint2f(contour.toArray());
RotatedRect minRect = Imgproc.minAreaRect( contour_points );
Point[] rect_points = new Point[4];
minRect.points( rect_points );
if(minRect.size.height > img.width() / 2)
{
List<Point> srcPoints = new ArrayList<Point>(4);
srcPoints.add(rect_points[2]);
srcPoints.add(rect_points[3]);
srcPoints.add(rect_points[0]);
srcPoints.add(rect_points[1]);
corners = Converters.vector_Point_to_Mat(
srcPoints, CvType.CV_32F);
}
}
Imgproc.erode(thresh, thresh, new Mat(), new Point(-1,-1), 10);
storeImage(thresh);
Imgproc.dilate(thresh, thresh, new Mat(), new Point(-1,-1), 5);
storeImage(thresh);
Mat results = new Mat(1000,250,CvType.CV_8UC3);
Mat quad = new Mat(1000,250,CvType.CV_8UC1);
List<Point> dstPoints = new ArrayList<Point>(4);
dstPoints.add(new Point(0, 0));
dstPoints.add(new Point(1000, 0));
dstPoints.add(new Point(1000, 250));
dstPoints.add(new Point(0, 250));
Mat quad_pts = Converters.vector_Point_to_Mat(
dstPoints, CvType.CV_32F);
Mat transmtx = Imgproc.getPerspectiveTransform(corners, quad_pts);
Imgproc.warpPerspective( img, results, transmtx, new Size(1000,250));
Imgproc.warpPerspective( thresh, quad, transmtx, new Size(1000,250));
Imgproc.resize(quad, quad, new Size(20,5));
Imgcodecs.imwrite("results.png",quad);
//store image
storeImage(quad);
//show image
showImage(quad);
System.out.println( quad.dump() );
for(int i = 0; i < quad.cols(); i++)
{
int size = (int) (quad.total() * quad.channels());
byte[] tmp = new byte[size];
String answer = "";
double[] d = new double[0];
d = quad.get(1, i);
answer += d[0] == 0 ? "" : "A";
d = quad.get(2, i);
answer += d[0] == 0 ? "" : "B";
d = quad.get(3, i);
answer += d[0] == 0 ? "" : "C";
d = quad.get(4, i);
answer += d[0] == 0 ? "" : "D";
if( answer.length() > 1 ) answer = "X"; // Double mark
int y = 0;
if( answer.equals("A")) y = results.rows() / (int) dims.height;
if( answer.equals("B")) y = results.rows() / (int) dims.height *2;
if( answer.equals("C")) y = results.rows() / (int) dims.height *3;
if( answer.equals("D")) y = results.rows() / (int) dims.height *4;
if( answer == "" ) answer = "[-]";
Imgproc.putText( results, answer, new Point( 50* i + 15, 30 + y), Core.FONT_HERSHEY_PLAIN, 2, new Scalar(0,0,255),2);
}
//store image
storeImage(results);
//show image
showImage(results);
}
});
public void showImage (Mat img) {
ImageView imgView = (ImageView) getActivity().findViewById(R.id.sampleImageView);
//Mat mRgba = new Mat();
//mRgba = Utils.loadResource(MainAct.this, R.drawable.your_image,Highgui.CV_LOAD_IMAGE_COLOR);
Bitmap img2 = Bitmap.createBitmap(img.cols(), img.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(img, img2);
imgView.setImageBitmap(img2);
}
public File mediaStorageDir () {
File _mediaStorageDir = new File(Environment.getExternalStorageDirectory()
+ "/Android/data/"
+ getActivity().getApplicationContext().getPackageName());
return _mediaStorageDir;
}
public void storeImage(Mat matImg) {
Bitmap bitmapImg = Bitmap.createBitmap(matImg.cols(), matImg.rows(),Bitmap.Config.ARGB_8888);
Utils.matToBitmap(matImg, bitmapImg);
String timeStamp = new SimpleDateFormat("ddMMyyyy_HHmm").format(new Date());
File mediaFile;
String mImageName="IMG_"+ timeStamp +".jpg";
mediaFile = new File(mediaStorageDir().getPath() + File.separator + mImageName);
File pictureFile = mediaFile;
try {
FileOutputStream fos = new FileOutputStream(pictureFile);
bitmapImg.compress(Bitmap.CompressFormat.PNG, 90, fos);
fos.close();
} catch (FileNotFoundException e) {
Log.d("FragmentMain", "File not found: " + e.getMessage());
} catch (IOException e) {
Log.d("FragmentMain", "Error accessing file: " + e.getMessage());
}
}

Android-Android OpenCV after perspectiveWarp/warpPerspective getting blank(Black) image as a result?

I am working on Android+OpenCv+JNI to find out the largest contour in the image and then cropping that largest Contour using perspective transformation. My problem is after applying transformation I am not able to convert resultant Mat to Bitmap and returning with error
OpenCV Error: Assertion failed (src.type() == CV_8UC1 || src.type() == CV_8UC3 || src.type() == CV_8UC4) in void Java_org_opencv_android_Utils_nMatToBitmap2(JNIEnv*, jclass, jlong, jobject, jboolean), file /home/reports/ci/slave_desktop/50-SDK/opencv/modules/java/generator/src/cpp/utils.cpp, line 98
Here is my JNI code:
JNIEXPORT jint JNICALL
Java_org_opencv_samples_tutorial3_Sample3Native_FindSquares(
JNIEnv* env, jobject, jlong addrRgba, jint draw, jlong addrDescriptor) {
Mat& image = *(Mat*) addrRgba;
Mat& pMatDesc = *(Mat*) addrDescriptor;
int thresh = 50, N = 4;
int found = 0;
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
// down-scale and upscale the image to filter out the noise
pyrDown(image, pyr, Size(image.cols / 2, image.rows / 2));
pyrUp(pyr, timg, image.size());
vector < vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 1; c < 3; c++) {
int ch[] = { c, 0 };
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for (int l = 0; l < N; l++) {
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if (l == 0) {
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
} else {
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l + 1) * 255 / N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for (size_t i = 0; i < contours.size(); i++) {
//__android_log_print(ANDROID_LOG_INFO, "Test", "Error:", v);
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx,
arcLength(Mat(contours[i]), true) * 0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 && fabs(contourArea(Mat(approx))) > 1000
&& isContourConvex(Mat(approx))) {
double maxCosine = 0;
for (int j = 2; j < 5; j++) {
// find the maximum cosine of the angle between joint edges
double cosine = fabs(
angle(approx[j % 4], approx[j - 2],
approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if (maxCosine < 0.3) {
circle(image, approx[0], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[1], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[2], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[3], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
//rectangle(image, approx[0], approx[2], Scalar(0,255,0,255), 5, 4, 0);
//Center of this rectangle
int x = (int) ((approx[0].x + approx[1].x + approx[2].x
+ approx[3].x) / 4.0);
int y = (int) ((approx[0].y + approx[1].y + approx[2].y
+ approx[3].y) / 4.0);
if ((int) draw) {
//outline
line(image, approx[0], approx[1],
Scalar(0, 255, 0, 255), 1, 4, 0);
line(image, approx[1], approx[2],
Scalar(0, 255, 0, 255), 1, 4, 0);
line(image, approx[2], approx[3],
Scalar(0, 255, 0, 255), 1, 4, 0);
line(image, approx[3], approx[0],
Scalar(0, 255, 0, 255), 1, 4, 0);
//center
//circle(image, Point(x,y), 1, Scalar(255,0,0,255));
}
vector<Point2f> src(4);
src[0] = approx[0];
src[1] = approx[1];
src[2] = approx[2];
src[3] = approx[3];
cv::Mat quad = cv::Mat::zeros(300, 220, CV_32FC1 );
// transformed quadrangle
vector<Point2f> quad_pts(4);
quad_pts[0] = Point(0, 0);
quad_pts[1] = Point(quad.cols, 0);
quad_pts[2] = Point(quad.cols, quad.rows);
quad_pts[3] = Point(0, quad.rows);
Mat transmtx = getPerspectiveTransform(src, quad_pts);
warpPerspective(src, quad, transmtx, quad.size());
quad.copyTo(pMatDesc);
found = 1;
jint result = (jint) found;
return result;
}
}
}
}
}
jint result = (jint) found;
return result;
}
In my java code I am calling this function as
found = FindSquares(mRgba.getNativeObjAddr(), mDraw,
descriptor.getNativeObjAddr());
And finally I am trying to convert final Mat to Bitmap
Mat final_mat = new Mat(descriptor.height(), descriptor.width(), CvType.CV_8UC4);
descriptor.copyTo(final_mat);
bitmap = Bitmap.createBitmap(final_mat.cols(), final_mat.rows(),
Bitmap.Config.ARGB_8888);
Utils.matToBitmap(final_mat, bitmap);
The final_mat channel type is becoming CV_32FC1.
How to convert the channel type to CV_8UC4 Please help me to find out the solution.
Edit:
I have changed the finat_mat image to CV_8UC3
descriptor.copyTo(final_mat);
descriptor.convertTo(final_mat, CvType.CV_8UC1);
Imgproc.cvtColor(final_mat,final_mat,Imgproc.COLOR_GRAY2RGB);
But I am getting blank(Black) image as a result??
After a long research I've found the solution. Here the problem raised because of conversions on actual image. We should use duplicate of actual Mat object to apply conversions(blurred, canny etc..), and use actual Mat object with the warp perspective transformation. Here I am attaching the reference code to find out the Largest Contour.
jni_part.cpp:
extern "C" {
double angle(Point pt1, Point pt2, Point pt0);
JNIEXPORT jint Java_info_androidhive_androidcameraapi_CameraMainActivity_findSquare(
JNIEnv*, jobject, jlong addrRgba, jlong addrDescriptor, jint width_,
jint height_);
JNIEXPORT jint Java_info_androidhive_androidcameraapi_CameraMainActivity_findSquare(
JNIEnv*, jobject, jlong addrRgba, jlong addrDescriptor, jint width_,
jint height_) {
Mat& image = *(Mat*) addrRgba;
Mat& imageCropped = *(Mat*) addrDescriptor;
int screen_width = (int) width_;
int screen_height = (int) height_;
Mat newSrc = image.clone();
imageCropped = image.clone();
Mat testImage = image.clone();
// blur will enhance edge detection
Mat blurred(testImage);
medianBlur(testImage, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;
// find squares in every color plane of the image
cv::vector<cv::vector<cv::Point> > squares;
for (int c = 0; c < 3; c++) {
int ch[] = { c, 0 };
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++) {
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0) {
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
} else {
gray = gray0 >= (l + 1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector<Point> approx;
if (contours.size() > 0) {
for (size_t i = 0; i < contours.size(); i++) {
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx,
arcLength(Mat(contours[i]), true) * 0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4
&& fabs(contourArea(Mat(approx))) > 1000
&& isContourConvex(Mat(approx))) {
double maxCosine = 0;
for (int j = 2; j < 5; j++) {
double cosine = fabs(
angle(approx[j % 4], approx[j - 2],
approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3) {
squares.push_back(approx);
/*circle(image, approx[0], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[1], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[2], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[3], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
if ((int) draw) {
line(image, approx[0], approx[1],
Scalar(0, 255, 0, 255), 2, 4, 0);
line(image, approx[1], approx[2],
Scalar(0, 255, 0, 255), 2, 4, 0);
line(image, approx[2], approx[3],
Scalar(0, 255, 0, 255), 2, 4, 0);
line(image, approx[3], approx[0],
Scalar(0, 255, 0, 255), 2, 4, 0);
}*/
}
}
}
}
}
}
if (squares.size() > 0) {
int max_width = 0;
int max_height = 0;
int max_square_idx = 0;
cv::vector<cv::Point> biggest_square;
squares.size());
for (size_t i = 0; i < squares.size(); i++) {
cv::Rect structure.
cv::Rect rectangle = boundingRect(cv::Mat(squares[i]));
// Store the index position of the biggest square found
if ((rectangle.width >= max_width)
&& (rectangle.height >= max_height)) {
max_width = rectangle.width;
max_height = rectangle.height;
max_square_idx = i;
}
}
biggest_square = squares[max_square_idx];
vector<Point> _adjustRect;
_adjustRect = squares[max_square_idx];
if (biggest_square.size() == 4) {
vector<Point> sortedPoints;
sortedPoints = squares[max_square_idx];
Point ptbiggest_square = biggest_square[0];
Point ptBottomLeft1 = biggest_square[0];
Point ptBottomRight1 = biggest_square[1];
Point ptTopRight1 = biggest_square[2];
Point ptTopLeft1 = biggest_square[3];
int bl = ptBottomLeft1.x + ptBottomLeft1.y;
int br = ptBottomRight1.x + ptBottomRight1.y;
int tr = ptTopRight1.x + ptTopRight1.y;
int tl = ptTopLeft1.x + ptTopLeft1.y;
int value_array[] = { bl, br, tr, tl };
int max = value_array[0];
int min = value_array[0];
for (int s = 0; s < 4; s++) {
if (value_array[s] > max) {
max = value_array[s];
} else if (value_array[s] < min) {
min = value_array[s];
}
}
int minIndex = 0;
int maxIndex = 0;
int missingIndexOne = 0;
int missingIndexTwo = 0;
for (int i = 0; i < 4; i++) {
if (value_array[i] == min) {
sortedPoints[0] = biggest_square[i];
minIndex = i;
continue;
}
if (value_array[i] == max) {
sortedPoints[2] = biggest_square[i];
maxIndex = i;
continue;
}
missingIndexOne = i;
}
for (int i = 0; i < 4; i++) {
if (missingIndexOne != i && minIndex != i && maxIndex != i) {
missingIndexTwo = i;
}
}
if (biggest_square[missingIndexOne].x
< biggest_square[missingIndexTwo].x) {
//2nd Point Found
sortedPoints[3] = biggest_square[missingIndexOne];
sortedPoints[1] = biggest_square[missingIndexTwo];
} else {
//4rd Point Found
sortedPoints[1] = biggest_square[missingIndexOne];
sortedPoints[3] = biggest_square[missingIndexTwo];
}
_adjustRect[0] = sortedPoints[0];
_adjustRect[1] = sortedPoints[1];
_adjustRect[2] = sortedPoints[2];
_adjustRect[3] = sortedPoints[3];
}
Point ptTopLeft = _adjustRect[0];
Point ptTopRight = _adjustRect[1];
Point ptBottomRight = _adjustRect[2];
Point ptBottomLeft = _adjustRect[3];
float imageScale = fminf((float) screen_width / newSrc.cols,
(float) screen_height / newSrc.rows);
__android_log_print(ANDROID_LOG_INFO, "OpenGLTest", "imageScale %f",
imageScale);
__android_log_print(ANDROID_LOG_INFO, "OpenGLTest", "width_ %d",
screen_width);
float w1 = sqrt(
pow(ptBottomRight.x / imageScale - ptBottomLeft.x / imageScale,
2)
+ pow(
ptBottomRight.x / imageScale
- ptBottomLeft.x / imageScale, 2));
float w2 = sqrt(
pow(ptTopRight.x / imageScale - ptTopLeft.x / imageScale, 2)
+ pow(
ptTopRight.x / imageScale
- ptTopLeft.x / imageScale, 2));
float h1 = sqrt(
pow(ptTopRight.y / imageScale - ptBottomRight.y / imageScale, 2)
+ pow(
ptTopRight.y / imageScale
- ptBottomRight.y / imageScale, 2));
float h2 = sqrt(
pow(ptTopLeft.y / imageScale - ptBottomLeft.y / imageScale, 2)
+ pow(
ptTopLeft.y / imageScale
- ptBottomLeft.y / imageScale, 2));
float maxWidth = (w1 < w2) ? w1 : w2;
float maxHeight = (h1 < h2) ? h1 : h2;
Point2f src[4], quad[4];
src[0].x = ptTopLeft.x;
src[0].y = ptTopLeft.y;
src[1].x = ptTopRight.x;
src[1].y = ptTopRight.y;
src[2].x = ptBottomRight.x;
src[2].y = ptBottomRight.y;
src[3].x = ptBottomLeft.x;
src[3].y = ptBottomLeft.y;
quad[0].x = 0;
quad[0].y = 0;
quad[1].x = maxWidth - 1;
quad[1].y = 0;
quad[2].x = maxWidth - 1;
quad[2].y = maxHeight - 1;
quad[3].x = 0;
quad[3].y = maxHeight - 1;
cv::Mat undistorted = cv::Mat(cvSize(maxWidth, maxHeight), CV_8UC1);
cv::warpPerspective(newSrc, undistorted,
cv::getPerspectiveTransform(src, quad),
cvSize(maxWidth, maxHeight));
imageCropped = undistorted.clone();
}
return 1;
}
double angle(Point pt1, Point pt2, Point pt0) {
double dx1 = pt1.x - pt0.x;
double dy1 = pt1.y - pt0.y;
double dx2 = pt2.x - pt0.x;
double dy2 = pt2.y - pt0.y;
return (dx1 * dx2 + dy1 * dy2)
/ sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2) + 1e-10);
}
}
Happy Coding!!

Categories

Resources