As I use the back camera to cap a frame, by default the Android application is landscape so to get the input frame I use
Core.flip(currentFrame, currentFrame, 1);//flip around Y-axi
After some image enhancement and findcontour using opencv,
I have the following problems:
a. Object moves left hand side, drawcirle moves downward.
b. Object moves right hand side, drawcircle moves upward.
c. Object moves upward, drawcircile moves left hand side.
d. Object moves downward, drawcircle moves right hand side.
In other word, the drawcircle (output) should be clockwise 90 to get the image of the source 1.
Code shown as follows:
package com.mtyiuaa.writingintheair;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.SurfaceView;
import android.view.View;
import android.content.Intent;
import android.view.ViewDebug;
import android.widget.Button;
import java.util.ArrayList;
import java.util.List;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.JavaCameraView;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.Point;
import org.opencv.core.MatOfPoint;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.core.Rect;
import org.opencv.imgproc.Imgproc;
import org.opencv.imgproc.Moments;
import org.opencv.highgui.VideoCapture;
public class MainActivity extends AppCompatActivity implements CameraBridgeViewBase.CvCameraViewListener2{
private static final int THRESH_BINARY = 1;
private static final int THRESH_TOZERO = 4;
private static String TAG = "MainActivity";
JavaCameraView javaCameraView;
JavaCameraView javaCameraView2;
VideoCapture videoCapture;
Mat mRgba;
Mat temp;
Mat previousFrame;
Mat GpreviousFrame; // gray-level frame of previous Frame
Mat currentFrame;
Mat GcurrentFrame; // gray-level frame of current Frame
Mat diffFrame;
Mat imgGray;
Mat imgHSV;
Mat imgCanny;
Mat inputFrame;
Mat FlipFrame;
Mat outputFrame;
Mat imgthresholding;
Mat imgNormalization;
Mat imgGaussianSmothing;
int max_Binary_value = 255;
int thresh = 20;
Boolean CameraActive;
Boolean firstIteration= true;
int[] theObject = {0,0};
int x=0, y=0;
int FRAME_WIDTH = 1280;
int FRAME_HEIGHT = 720;
//max number of objects to be detected in frame
int MAX_NUM_OBJECTS=50;
//Minimum and Maximum object area
int MIN_OBJECT_AREA = 20*20;
int MAX_OBJECT_AREA = (int) ((FRAME_HEIGHT*FRAME_WIDTH)/1.5);
//MatOfPoint allcontours = new MatOfPoint();
//bounding rectangle of the object, we will use the center of this as its position.
BaseLoaderCallback mLoaderCallBack = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch(status){
case BaseLoaderCallback.SUCCESS:{
javaCameraView.enableView();
//javaCameraView2.enableView();
break;
}
default:{
super.onManagerConnected(status);
break;
}
}
}
};
static{
}
//JavaCameraView javaCameraView;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
javaCameraView = (JavaCameraView)findViewById(R.id.java_camera_view);
javaCameraView.setVisibility(SurfaceView.VISIBLE);
javaCameraView.setCvCameraViewListener(this);
#Override
protected void onPause(){
super.onPause();
if(javaCameraView!=null) {
CameraActive = false;
javaCameraView.disableView();
}
}
#Override
protected void onDestroy(){
super.onDestroy(); // call the basic function
if(javaCameraView!=null){
javaCameraView.disableView();
}
}
#Override
protected void onResume(){
super.onResume(); //call based class
if(OpenCVLoader.initDebug()){
Log.i(TAG, "OpenCV loaded successfully");
mLoaderCallBack.onManagerConnected(LoaderCallbackInterface.SUCCESS);
//grab a new instance by using Basecallbackloader
}
else {
Log.i(TAG, "OpenCV not loaded");
//recall opencvLoader if not loaded
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_10, this, mLoaderCallBack);
}
}
#Override
public void onCameraViewStarted(int width, int height) {
//Mat::Mat(int rows, int cols, int type)
// initialize all Mat object when onCamera starts
CameraActive = true;
// 4 channels are used
mRgba = new Mat(height, width, CvType.CV_8SC4);
FlipFrame = new Mat(height, width, CvType.CV_8SC4);
previousFrame =new Mat(height, width, CvType.CV_8SC4);
currentFrame = new Mat(height, width, CvType.CV_8SC4);
diffFrame =new Mat(height, width, CvType.CV_8SC4);
// 1 channel is used.
GcurrentFrame = new Mat(height, width, CvType.CV_8SC1);
GpreviousFrame = new Mat(height, width, CvType.CV_8SC1);
imgGray= new Mat(height, width, CvType.CV_8SC1);
imgHSV = new Mat (height, width, CvType.CV_8SC1);
imgCanny = new Mat(height, width, CvType.CV_8SC1);
imgGaussianSmothing = new Mat(height, width, CvType.CV_8SC1);
imgthresholding = new Mat(height, width, CvType.CV_8SC1);
imgNormalization = new Mat(height,width, CvType.CV_8SC1);
inputFrame = new Mat(height, width, CvType.CV_8SC1);
outputFrame = new Mat(height, width, CvType.CV_8SC1);
temp = new Mat(height, width, CvType.CV_8SC1);
}
#Override
public void onCameraViewStopped() {
mRgba.release();
FlipFrame.release();
previousFrame.release();
currentFrame.release();
diffFrame.release();
GcurrentFrame.release();
GpreviousFrame.release();
imgGray.release();
imgHSV.release();
imgCanny.release();
imgGaussianSmothing.release();
imgthresholding.release();
imgNormalization.release();
inputFrame.release();
outputFrame.release();
temp.release();
CameraActive = false;
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
while(CameraActive) {
Mat temp2 = new Mat();
Mat temp3 = new Mat();
currentFrame = inputFrame.rgba();
Core.flip(currentFrame, currentFrame, 1);//flip aroud Y-axis
RGB2HSV(currentFrame).copyTo(temp2);
FilterHSVImage(temp2).copyTo(temp2);
//CannyDetector(temp2).copyTo(temp4);
MorphOperation(temp2).copyTo(temp2);
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Imgproc.findContours(temp2,contours,hierarchy,Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
temp2.copyTo(temp3);
FindLargestContours(temp3, contours);
//return outputFrame;
}
return null;
}
// Edge Detector using Canny
// Goal: Edge image is less sensitive to lighting conditon
public Mat CannyDetector(Mat inputFrame) {
Imgproc.Canny(inputFrame, imgCanny, 50, 150);
return imgCanny;
}
private Mat RGB2Gray (Mat inputFrame){
Imgproc.cvtColor(inputFrame, imgGray, Imgproc.COLOR_RGB2GRAY);
return imgGray;
}
private Mat RGB2HSV (Mat inputFrame){
Imgproc.cvtColor(inputFrame, imgHSV, Imgproc.COLOR_RGB2HSV);
return imgHSV;
}
private Mat FilterHSVImage(Mat inputFrame){
Core.inRange(inputFrame, new Scalar(0, 100, 100), new Scalar(10, 255, 255), imgthresholding);
//Core.inRange(temp2, new Scalar(160, 100, 100), new Scalar(179, 255, 255), temp2);
return imgthresholding;
}
private Mat MorphOperation (Mat inputFrame){
//Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2*dilation_size + 1, 2*dilation_size+1));
//Imgproc.dilate(source, destination, element1);
//Highgui.imwrite("dilation.jpg", destination);
Mat erodeElement =Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3,3));
Mat dilateElement = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size (8,8));
Imgproc.dilate(inputFrame, inputFrame, erodeElement);
Imgproc.dilate(inputFrame, inputFrame, erodeElement);
Imgproc.erode(inputFrame, inputFrame, dilateElement);
Imgproc.erode(inputFrame, inputFrame, dilateElement);
return inputFrame;
}
private Mat Threshold(Mat inputFrame){
Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, Imgproc.THRESH_TOZERO);
return imgthresholding;
}
private Mat ThresholdToBinary(Mat inputFrame){
Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, Imgproc.THRESH_BINARY);
//Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, THRESH_BINARY);
return imgthresholding;
}
private Mat Normalization(Mat inputFrame, double min, double max){
//double E_Max =
Core.normalize(inputFrame, imgNormalization, min, max, Core.NORM_MINMAX);
return imgNormalization;
}
private Mat drawObject(int x, int y, Mat inputFrame) {
Point point = new Point(x, y);
Point pointA = new Point(x, y - 25);
Point pointB = new Point(x, y + 25);
Point pointC = new Point(x - 25, y);
Point pointD = new Point(x + 25, y);
Scalar scalar = new Scalar(255, 0, 0);
Core.circle(inputFrame,point,20,scalar,2);
if(y-25>0) Core.line(inputFrame,point,pointA,scalar,2);
else Core.line(inputFrame,point,new Point(x,0),scalar,2);
if(y+25<FRAME_HEIGHT) Core.line(inputFrame,point,pointB,scalar,2);
else Core.line(inputFrame,point,new Point(x,FRAME_HEIGHT),scalar,2);
if(x-25>0)Core.line(inputFrame,point,pointC,scalar,2);
else Core.line(inputFrame,point,new Point(0,y),scalar,2);
if(x+25<FRAME_WIDTH) Core.line(inputFrame,point,pointD,scalar,2);
else Core.line(inputFrame,point,new Point(FRAME_WIDTH,y),scalar,2);
Core.putText(inputFrame, "Tracking object at (" + Integer.toString(x)+" , "+ Integer.toString(y)+ ")",point, 1, 1,scalar, 2);
// putText(inputFrame,intToString(x)+","+intToString(y),Point(x,y+30),1,1,Scalar(0,255,0),2);
Log.i(TAG, "Draw x at "+Integer.toString(x)+ " Draw y at "+ Integer.toString(y));
inputFrame.copyTo(outputFrame);
return outputFrame;
}
private void TrackFilteredObject (int x, int y, Mat filteredImage, Mat sourceImage){
boolean objectFound = false;
Mat temp3 = new Mat();
filteredImage.copyTo(temp3);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(temp3,contours,hierarchy,Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);
//Point[] contourPoints = (Point[]) contours.toArray();
double refArea = 0;
if (hierarchy.size().height>0 && hierarchy.size().width>0){
// int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
//if(numObjects<MAX_NUM_OBJECTS) {
for (int index = 0; index >= 0; index =(int)hierarchy.get(index,0)[0]){
//hierarchy[index][0]) {
Moments moment = Imgproc.moments(contours.get(index), true);
double area = moment.get_m00();
//if the area is less than 20 px by 20px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if (area > MIN_OBJECT_AREA && area < MAX_OBJECT_AREA && area > refArea) {
// x = moment.m10 / area;
x= (int) (moment.get_m10()/area);
y = (int) (moment.get_m01()/area);
objectFound = true;
refArea = area;
} else objectFound = false;
}
//}
}
}
}
Replace x with y, it's pretty simple man come on
Related
I want to save a Mat mRgba as a picture with Imgcodecs.imwrite(Environment.getExternalStorageDirectory() + "/final-image.jpg", mRgba); and in general it saves more that I want to. I want to save image without rectangle Imgproc.rectangle(mRgba, new Point(touchedYD, touchedXL), new Point(touchedYU, touchedXR), Util.WHITE, 2); that is drawn on screen before saving. How to achieve that?
Here is my code.
Fragment:
public class StageTwo extends Fragment implements CameraBridgeViewBase.CvCameraViewListener2, OnSwitchFragmentFromStageTwo {
#Override
public void onViewCreated(#NonNull View view, #Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
((Stages) getActivity()).onSwitchFragmentFromStageTwo = this;
view.setOnTouchListener(new View.OnTouchListener() {
#Override
public boolean onTouch(View v, MotionEvent event) {
// poziomo
if (-event.getX() + camLayHeight + (xCorrection * 10) < (camLayHeight / 2)) {
touchedXR = -event.getX() + camLayHeight + (xCorrection * 10);
if (touchedXR < 0) touchedXR = 0;
} else {
touchedXL = -event.getX() + camLayHeight + (xCorrection * 10);
if (touchedXL > camLayHeight) touchedXL = camLayHeight;
}
// pionowo
if (event.getY() - (yCorrection * 10) < (camLayWidth / 2)) {
touchedYU = event.getY() - (yCorrection * 10);
if (touchedYU < 0) touchedYU = 0;
} else {
touchedYD = event.getY() - (yCorrection * 10);
if (touchedYD > camLayWidth) touchedYD = camLayWidth;
}
return true;
}
});
kamera = view.findViewById(R.id.java_surface_view);
kamera.setCvCameraViewListener(this);
Display display = getActivity().getWindowManager().getDefaultDisplay();
android.graphics.Point size = new android.graphics.Point();
display.getSize(size);
int height = size.y;
kamera.getLayoutParams().height = height / 2;
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Core.transpose(mGray, mGray);
Core.flip(mGray, mGray, -1);
Imgproc.cvtColor(inputFrame.rgba(), mRgba, Imgproc.COLOR_RGBA2RGB, 1);
if (gridPref.equals(getString(R.string.yes))) {
Imgproc.line(mRgba, p1, p2, Util.BLUE);
Imgproc.line(mRgba, p3, p4, Util.BLUE);
}
Imgproc.rectangle(mRgba, new Point(touchedYD, touchedXL), new Point(touchedYU, touchedXR), Util.WHITE, 2);
rozmiar_y = (int) ((touchedYU - touchedYD));
rozmiar_x = (int) ((touchedXL - touchedXR));
if (rozmiar_x > rozmiar_y)
px_cm = (double) Math.round((rozmiar_x / Integer.parseInt(rozmiar)) * 100000) / 100000d;
if (rozmiar_x < rozmiar_y)
px_cm = (double) Math.round((rozmiar_y / Integer.parseInt(rozmiar)) * 100000) / 100000d;
return mRgba;
}
#Override
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC3);
new Mat(height, width, CvType.CV_8UC4);
mGray = new Mat(height, width, CvType.CV_8UC1);
camLayHeight = height; // mniejsza wartosc 480
camLayWidth = width;
touchedXL = camLayHeight / 2;
touchedXR = camLayHeight / 2;
touchedYD = camLayWidth / 2;
touchedYU = camLayWidth / 2;
}
#Override
public void onCameraViewStopped() {
}
#Override
public double onSwitchFragmentFromFragmentTwo() {
if (px_cm > 0.5) {
(...)
Imgcodecs.imwrite(Environment.getExternalStorageDirectory() + "/final-image.jpg", mRgba);
}
return px_cm;
}
}
Activity
OnSwitchFragmentFromStageTwo onSwitchFragmentFromStageTwo;
#Override
protected void onCreate(#Nullable Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.stages);
bottomNavigationView = findViewById(R.id.bottom_navigation);
bottomNavigationView.setItemIconTintList(null);
SharedPreferences sp = PreferenceManager.getDefaultSharedPreferences(Stages.this);
FragmentTransaction ft = getSupportFragmentManager().beginTransaction();
if (!sp.getBoolean("correctionDone", false))
ft.replace(R.id.content_frame, new StageZero(this));
else {
ft.replace(R.id.content_frame, new StageOne());
bottomNavigationView.setSelectedItemId(R.id.navigation_stage_one);
}
ft.commit();
SharedPreferences.Editor editor = sp.edit();
bottomNavigationView.setEnabled(false);
bottomNavigationView.setOnNavigationItemSelectedListener(new BottomNavigationView.OnNavigationItemSelectedListener() {
#Override
public boolean onNavigationItemSelected(#NonNull MenuItem item) {
(...)
if (bottomNavigationView.getSelectedItemId() == R.id.navigation_stage_two) {
if (onSwitchFragmentFromStageTwo.onSwitchFragmentFromFragmentTwo() <= 0.5) {
(...)
return false;
} else {
(...)
return true;
}
} else {
(...)
}
return true;
}
});
}
I was trying to solve this problem by setting touchedXL = 0; touchedXR = 0; touchedYD = 0; touchedYU = 0; right before saving but it did not help, picture is still saved with this rectangle. If you need something more just ask. Thank you in advance! :)
You may create a copy of mRgba before drawing the rectangle.
Add a new private class member mRgbNoRect:
private Mat mRgbNoRect; //mRgba before drawing rectangle
Initialize mRgbNoRect in onCameraViewStarted:
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC3);
mRgbNoRect = new Mat(height, width, CvType.CV_8UC3);
}
Create a copy of mRgba before drawing the rectangle in onCameraFrame:
Imgproc.cvtColor(inputFrameRgba, mRgba, Imgproc.COLOR_RGBA2RGB);
mRgba.copyTo(mRgbNoRect); //Copy mRgba content to mRgbNoRect before drawing a rectangle
Imgproc.rectangle(mRgba, new Point(20, 20), new Point(100, 100), new Scalar(255, 255, 255), 2);
Note: It's just an example (not your original code).
Add a "get" function getRgbNoRect():
public Mat getRgbNoRect() {
return mRgbNoRect;
}
Get mRgbNoRect and save it (example):
Mat rgbNoRect = sample.getRgbNoRect();
Imgcodecs.imwrite("rgbNoRect.png", rgbNoRect);
Here is a complete code sample (simple sample without a camera):
package myproject;
import org.opencv.core.Core;
import org.opencv.core.Mat;
import org.opencv.core.CvType;
import org.opencv.core.Scalar;
import org.opencv.core.Point;
import org.opencv.imgproc.Imgproc;
import org.opencv.imgcodecs.Imgcodecs;
class Sample {
private Mat mRgba;
private Mat mRgbNoRect; //mRgba before drawing rectangle
static { System.loadLibrary(Core.NATIVE_LIBRARY_NAME); }
public Mat onCameraFrame() {
//Create RGBA matrix filled with grin color - simulating inputFrame.rgba().
Mat inputFrameRgba = Mat.zeros(200, 250, CvType.CV_8UC4);
inputFrameRgba.setTo(new Scalar(0, 255, 0, 255));
Imgproc.cvtColor(inputFrameRgba, mRgba, Imgproc.COLOR_RGBA2RGB);
mRgba.copyTo(mRgbNoRect); //Copy mRgba content to mRgbNoRect before drawing a rectangle
Imgproc.rectangle(mRgba, new Point(20, 20), new Point(100, 100), new Scalar(255, 255, 255), 2);
return mRgba;
}
public Mat getRgbNoRect() {
return mRgbNoRect;
}
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC3);
mRgbNoRect = new Mat(height, width, CvType.CV_8UC3);
}
public static void main(String[] args) {
Sample sample = new Sample();
sample.onCameraViewStarted(250, 200);
Mat rgbWithRect = sample.onCameraFrame();
Mat rgbNoRect = sample.getRgbNoRect();
Imgcodecs.imwrite("rgbWithRect.png", rgbWithRect);
Imgcodecs.imwrite("rgbNoRect.png", rgbNoRect);
}
}
Notes:
The code sample is tested in Windows, and I am not sure if it can be executed in Android as is.
The last time I programmed in JAVA was many years ago, so I hope I didn't do some rookie's mistakes.
I'm using the following class to detect/receive a found document/paper:
public class DetekPs {
/**
* Object that encapsulates the contour and 4 points that makes the larger
* rectangle on the image
*/
public static class Quadrilateral {
public MatOfPoint contour;
public Point[] points;
public Quadrilateral(MatOfPoint contour, Point[] points) {
this.contour = contour;
this.points = points;
}
}
public static Quadrilateral findDocument( Mat inputRgba ) {
ArrayList<MatOfPoint> contours = findContours(inputRgba);
Quadrilateral quad = getQuadrilateral(contours);
return quad;
}
private static ArrayList<MatOfPoint> findContours(Mat src) {
double ratio = src.size().height / 500;
int height = Double.valueOf(src.size().height / ratio).intValue();
int width = Double.valueOf(src.size().width / ratio).intValue();
Size size = new Size(width,height);
Mat resizedImage = new Mat(size, CvType.CV_8UC4);
Mat grayImage = new Mat(size, CvType.CV_8UC4);
Mat cannedImage = new Mat(size, CvType.CV_8UC1);
Imgproc.resize(src,resizedImage,size);
Imgproc.cvtColor(resizedImage, grayImage, Imgproc.COLOR_RGBA2GRAY, 4);
Imgproc.GaussianBlur(grayImage, grayImage, new Size(5, 5), 0);
Imgproc.Canny(grayImage, cannedImage, 75, 200);
ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(cannedImage, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();
Collections.sort(contours, new Comparator<MatOfPoint>() {
#Override
public int compare(MatOfPoint lhs, MatOfPoint rhs) {
return Double.valueOf(Imgproc.contourArea(rhs)).compareTo(Imgproc.contourArea(lhs));
}
});
resizedImage.release();
grayImage.release();
cannedImage.release();
return contours;
}
private static Quadrilateral getQuadrilateral(ArrayList<MatOfPoint> contours) {
for ( MatOfPoint c: contours ) {
MatOfPoint2f c2f = new MatOfPoint2f(c.toArray());
double peri = Imgproc.arcLength(c2f, true);
MatOfPoint2f approx = new MatOfPoint2f();
Imgproc.approxPolyDP(c2f, approx, 0.02 * peri, true);
Point[] points = approx.toArray();
// select biggest 4 angles polygon
if (points.length == 4) {
Point[] foundPoints = sortPoints(points);
return new Quadrilateral(c, foundPoints);
}
}
return null;
}
private static Point[] sortPoints(Point[] src) {
ArrayList<Point> srcPoints = new ArrayList<>(Arrays.asList(src));
Point[] result = { null , null , null , null };
Comparator<Point> sumComparator = new Comparator<Point>() {
#Override
public int compare(Point lhs, Point rhs) {
return Double.valueOf(lhs.y + lhs.x).compareTo(rhs.y + rhs.x);
}
};
Comparator<Point> diffComparator = new Comparator<Point>() {
#Override
public int compare(Point lhs, Point rhs) {
return Double.valueOf(lhs.y - lhs.x).compareTo(rhs.y - rhs.x);
}
};
// top-left corner = minimal sum
result[0] = Collections.min(srcPoints, sumComparator);
// bottom-right corner = maximal sum
result[2] = Collections.max(srcPoints, sumComparator);
// top-right corner = minimal diference
result[1] = Collections.min(srcPoints, diffComparator);
// bottom-left corner = maximal diference
result[3] = Collections.max(srcPoints, diffComparator);
return result;
}
}
Calling the detectormethod and drawing:
takenPicture.setImageBitmap(bmp); //default imageview which displays the entire image
if (OpenCVLoader.initDebug()) {
Mat m = new Mat();
Utils.bitmapToMat(bmp,m);
DetekPs.Quadrilateral h = DetekPs.findDocument(m);
new Handler().postDelayed(new Runnable() {
#Override
public void run() {
takenPicture2.setPointsAndDraw(h.points); //draws only the points of the detected object
}
}, 2500);
}
Drawing the points:
#Override
protected void onDraw(Canvas canvas) {
Log.e("i","he ra hu" );
Paint paint2 = new Paint();
paint2.setColor(Color.RED);
paint2.setStrokeWidth(3);
for(int idx=0;idx<po.length;idx++){
canvas.drawCircle((float)po[idx].x,(float)po[idx].y, 6, paint2);
Log.e("x",""+xx);
Log.e("y",""+yy);
}
}
The points I receive look like this:
x: 23.0
y: 122.0
x: 249.0
y: 110.0
x: 249.0
y: 110.0
x: 0.0
y: 182.0
Which causes the points get drawn like this:
As you can see the points a receive and thus draw are far away from the image i have.
The question is rather what can the cause be. am i drawing wrong am i receiving the false points? or is is an issue on how large/width my image/bitmap is so i need to set the right aspect/resolution?
I am trying to implement Paper detection through OpenCV. I am able to understand the concept of how can I get it,
Input-> Canny-> Blur-> Find Conture-> Search (closed)Quadrilateral-> Draw Conture
but still, I am new to OpenCV programming. So having issues in implementing it. I was able to find help through this answer
Android OpenCV Paper Sheet detection
but it's drawing contour on every possible lining. Here is the code I am trying to implement.
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
Imgproc.drawContours(mRgba,findContours(mRgba), 0, new Scalar(0 , 255, 0), 5);
return mRgba;
}
public static class Quadrilateral {
public MatOfPoint contour;
public Point[] points;
public Quadrilateral(MatOfPoint contour, Point[] points) {
this.contour = contour;
this.points = points;
}
}
public static Quadrilateral findDocument( Mat inputRgba ) {
ArrayList<MatOfPoint> contours = findContours(inputRgba);
Quadrilateral quad = getQuadrilateral(contours);
return quad;
}
private static ArrayList<MatOfPoint> findContours(Mat src) {
double ratio = src.size().height / 500;
int height = Double.valueOf(src.size().height / ratio).intValue();
int width = Double.valueOf(src.size().width / ratio).intValue();
Size size = new Size(width,height);
Mat resizedImage = new Mat(size, CvType.CV_8UC4);
Mat grayImage = new Mat(size, CvType.CV_8UC4);
Mat cannedImage = new Mat(size, CvType.CV_8UC1);
Imgproc.resize(src,resizedImage,size);
Imgproc.cvtColor(resizedImage, grayImage, Imgproc.COLOR_RGBA2GRAY, 4);
Imgproc.GaussianBlur(grayImage, grayImage, new Size(5, 5), 0);
Imgproc.Canny(grayImage, cannedImage, 75, 200);
ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(cannedImage, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();
Collections.sort(contours, new Comparator<MatOfPoint>() {
#Override
public int compare(MatOfPoint lhs, MatOfPoint rhs) {
return Double.valueOf(Imgproc.contourArea(rhs)).compareTo(Imgproc.contourArea(lhs));
}
});
resizedImage.release();
grayImage.release();
cannedImage.release();
return contours;
}
private static Quadrilateral getQuadrilateral(ArrayList<MatOfPoint> contours) {
for ( MatOfPoint c: contours ) {
MatOfPoint2f c2f = new MatOfPoint2f(c.toArray());
double peri = Imgproc.arcLength(c2f, true);
MatOfPoint2f approx = new MatOfPoint2f();
Imgproc.approxPolyDP(c2f, approx, 0.02 * peri, true);
Point[] points = approx.toArray();
// select biggest 4 angles polygon
if (points.length == 4) {
Point[] foundPoints = sortPoints(points);
return new Quadrilateral(c, foundPoints);
}
}
return null;
}
private static Point[] sortPoints(Point[] src) {
ArrayList<Point> srcPoints = new ArrayList<>(Arrays.asList(src));
Point[] result = { null , null , null , null };
Comparator<Point> sumComparator = new Comparator<Point>() {
#Override
public int compare(Point lhs, Point rhs) {
return Double.valueOf(lhs.y + lhs.x).compareTo(rhs.y + rhs.x);
}
};
Comparator<Point> diffComparator = new Comparator<Point>() {
#Override
public int compare(Point lhs, Point rhs) {
return Double.valueOf(lhs.y - lhs.x).compareTo(rhs.y - rhs.x);
}
};
// top-left corner = minimal sum
result[0] = Collections.min(srcPoints, sumComparator);
// bottom-right corner = maximal sum
result[2] = Collections.max(srcPoints, sumComparator);
// top-right corner = minimal diference
result[1] = Collections.min(srcPoints, diffComparator);
// bottom-left corner = maximal diference
result[3] = Collections.max(srcPoints, diffComparator);
return result;
}
The answer suggests that I should use Quadrilateral Object and call it with Imgproc.drawContours(), but this function takes in ArrayList as argument where as Quadrilateral object contains MatofPoint and Point[]. Can someone help me through this..I am using OpenCV(3.3) and Android (1.5.1)?
Here is the sample what it should look like
I'm using Color Blob detector Sample code of OpenCV (Sample Code), but app is crashing at Core.multiply(contour, new Scalar(4,4), contour); giving some fatal error
libc: Fatal signal 11 (SIGSEGV), code 1, fault addr 0x0 in tid 11016 (Thread-4857)
and detailed Log is available to get idea of exact reason. In some other part of code I was using Core.add(fg, bg, markers); and app was giving same fatal error there as well. What is the issue? Please guide towards the solution. I 'm using openCV(3.1)
Code is as following:
package com.iu.kamraapp.utils;
import android.util.Log;
import java.util.ArrayList;
import java.util.Iterator;
import java.util.List;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Scalar;
import org.opencv.imgproc.Imgproc;
public class ColorBlobDetector {
// Lower and Upper bounds for range checking in HSV color space
private Scalar mLowerBound = new Scalar(0);
private Scalar mUpperBound = new Scalar(0);
// Minimum contour area in percent for contours filtering
private static double mMinContourArea = 0.1;
// Color radius for range checking in HSV color space
private Scalar mColorRadius = new Scalar(25,50,50,0);
private Mat mSpectrum = new Mat();
private List<MatOfPoint> mContours = new ArrayList<MatOfPoint>();
// Cache
Mat mPyrDownMat = new Mat();
Mat mHsvMat = new Mat();
Mat mMask = new Mat();
Mat mDilatedMask = new Mat();
Mat mHierarchy = new Mat();
public void setColorRadius(Scalar radius) {
mColorRadius = radius;
}
public void setHsvColor(Scalar hsvColor) {
double minH = (hsvColor.val[0] >= mColorRadius.val[0]) ? hsvColor.val[0]-mColorRadius.val[0] : 0;
double maxH = (hsvColor.val[0]+mColorRadius.val[0] <= 255) ? hsvColor.val[0]+mColorRadius.val[0] : 255;
mLowerBound.val[0] = minH;
mUpperBound.val[0] = maxH;
mLowerBound.val[1] = hsvColor.val[1] - mColorRadius.val[1];
mUpperBound.val[1] = hsvColor.val[1] + mColorRadius.val[1];
mLowerBound.val[2] = hsvColor.val[2] - mColorRadius.val[2];
mUpperBound.val[2] = hsvColor.val[2] + mColorRadius.val[2];
mLowerBound.val[3] = 0;
mUpperBound.val[3] = 255;
Mat spectrumHsv = new Mat(1, (int)(maxH-minH), CvType.CV_8UC3);
for (int j = 0; j < maxH-minH; j++) {
byte[] tmp = {(byte)(minH+j), (byte)255, (byte)255};
spectrumHsv.put(0, j, tmp);
}
Imgproc.cvtColor(spectrumHsv, mSpectrum, Imgproc.COLOR_HSV2RGB_FULL, 4);
}
public Mat getSpectrum() {
return mSpectrum;
}
public void setMinContourArea(double area) {
mMinContourArea = area;
}
public void process(Mat rgbaImage) {
try {
Imgproc.pyrDown(rgbaImage, mPyrDownMat);
Imgproc.pyrDown(mPyrDownMat, mPyrDownMat);
Imgproc.cvtColor(mPyrDownMat, mHsvMat, Imgproc.COLOR_RGB2HSV_FULL);
Core.inRange(mHsvMat, mLowerBound, mUpperBound, mMask);
Imgproc.dilate(mMask, mDilatedMask, new Mat());
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(mDilatedMask, contours, mHierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
// Find max contour area
double maxArea = 0;
Iterator<MatOfPoint> each = contours.iterator();
while (each.hasNext()) {
MatOfPoint wrapper = each.next();
double area = Imgproc.contourArea(wrapper);
if (area > maxArea)
maxArea = area;
}
// Filter contours by area and resize to fit the original image size
mContours.clear();
each = contours.iterator();
while (each.hasNext()) {
MatOfPoint contour = each.next();
if (Imgproc.contourArea(contour) > mMinContourArea * maxArea) {
Core.multiply(contour, new Scalar(4, 4), contour); // issue is here
mContours.add(contour);
}
}
}
catch (Exception e) { e.printStackTrace(); }
}
public List<MatOfPoint> getContours() {
return mContours;
}
}
package com.iu.kamraapp;
import android.content.Context;
import android.content.res.Configuration;
import android.os.Bundle;
import android.support.v7.app.AppCompatActivity;
import android.util.Log;
import android.view.MotionEvent;
import android.view.SurfaceView;
import android.view.View;
import com.iu.kamraapp.utils.AppGlobal;
import com.iu.kamraapp.utils.ColorBlobDetector;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.JavaCameraView;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.MatOfPoint;
import org.opencv.core.Point;
import org.opencv.core.Rect;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import java.util.ArrayList;
import java.util.List;
public class ColorBlobActivity extends AppCompatActivity implements View.OnTouchListener,CameraBridgeViewBase.CvCameraViewListener2 {
private static final String TAG = "MainActivity";
Context context;
int screenWidth, screenHeight;
private CameraBridgeViewBase mOpenCvCameraView;
private boolean mIsColorSelected = false;
private Mat mRgba;
private Scalar mBlobColorRgba;
private Scalar mBlobColorHsv;
private ColorBlobDetector mDetector;
private Mat mSpectrum;
private Size SPECTRUM_SIZE;
private Scalar CONTOUR_COLOR;
BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
mOpenCvCameraView.setOnTouchListener(ColorBlobActivity.this);
}
break;
default: {
super.onManagerConnected(status);
}
break;
}
}
};
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
context = this;
setContentView(R.layout.activity_main);
screenWidth = AppGlobal.getScreenResolution(context, true);
screenHeight = AppGlobal.getScreenResolution(context, false);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial1_activity_java_surface_view);
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
}
#Override
public void onConfigurationChanged(Configuration newConfig) {
super.onConfigurationChanged(newConfig);
if (newConfig.orientation == Configuration.ORIENTATION_LANDSCAPE) {
screenWidth = AppGlobal.getScreenResolution(context, false);
screenHeight = AppGlobal.getScreenResolution(context, true);
} else if (newConfig.orientation == Configuration.ORIENTATION_PORTRAIT) {
screenWidth = AppGlobal.getScreenResolution(context, true);
screenHeight = AppGlobal.getScreenResolution(context, false);
}
}
#Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
#Override
public void onResume()
{
super.onResume();
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_1_0, this, mLoaderCallback);
OpenCVLoader.initDebug(true);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
#Override
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
mDetector = new ColorBlobDetector();
mSpectrum = new Mat();
mBlobColorRgba = new Scalar(255);
mBlobColorHsv = new Scalar(255);
SPECTRUM_SIZE = new Size(200, 64);
CONTOUR_COLOR = new Scalar(255,0,0,255);
}
public void onCameraViewStopped() {
mRgba.release();
}
public boolean onTouch(View v, MotionEvent event) {
try {
int cols = mRgba.cols();
int rows = mRgba.rows();
int xOffset = (mOpenCvCameraView.getWidth() - cols) / 2;
int yOffset = (mOpenCvCameraView.getHeight() - rows) / 2;
int x = (int) event.getX() - xOffset;
int y = (int) event.getY() - yOffset;
Log.i(TAG, "Touch image coordinates: (" + x + ", " + y + ")");
if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return false;
Rect touchedRect = new Rect();
touchedRect.x = (x > 4) ? x - 4 : 0;
touchedRect.y = (y > 4) ? y - 4 : 0;
touchedRect.width = (x + 4 < cols) ? x + 4 - touchedRect.x : cols - touchedRect.x;
touchedRect.height = (y + 4 < rows) ? y + 4 - touchedRect.y : rows - touchedRect.y;
Mat touchedRegionRgba = mRgba.submat(touchedRect);
Mat touchedRegionHsv = new Mat();
Imgproc.cvtColor(touchedRegionRgba, touchedRegionHsv, Imgproc.COLOR_RGB2HSV_FULL);
// Calculate average color of touched region
mBlobColorHsv = Core.sumElems(touchedRegionHsv);
int pointCount = touchedRect.width * touchedRect.height;
for (int i = 0; i < mBlobColorHsv.val.length; i++)
mBlobColorHsv.val[i] /= pointCount;
mBlobColorRgba = converScalarHsv2Rgba(mBlobColorHsv);
Log.i(TAG, "Touched rgba color: (" + mBlobColorRgba.val[0] + ", " + mBlobColorRgba.val[1] +
", " + mBlobColorRgba.val[2] + ", " + mBlobColorRgba.val[3] + ")");
mDetector.setHsvColor(mBlobColorHsv);
Imgproc.resize(mDetector.getSpectrum(), mSpectrum, SPECTRUM_SIZE);
mIsColorSelected = true;
touchedRegionRgba.release();
touchedRegionHsv.release();
} catch (Exception e) { e.printStackTrace(); }
return false; // don't need subsequent touch events
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
mDetector.process(mRgba);
List<MatOfPoint> contours = mDetector.getContours();
Log.d(TAG, "Contours count: " + contours.size());
Imgproc.drawContours(mRgba, contours, -1, CONTOUR_COLOR);
Mat colorLabel = mRgba.submat(4, 68, 4, 68);
colorLabel.setTo(mBlobColorRgba);
Mat spectrumLabel = mRgba.submat(4, 4 + mSpectrum.rows(), 70, 70 + mSpectrum.cols());
mSpectrum.copyTo(spectrumLabel);
}
return mRgba;
}
private Scalar converScalarHsv2Rgba(Scalar hsvColor) {
Mat pointMatRgba = new Mat();
Mat pointMatHsv = new Mat(1, 1, CvType.CV_8UC3, hsvColor);
Imgproc.cvtColor(pointMatHsv, pointMatRgba, Imgproc.COLOR_HSV2RGB_FULL, 4);
return new Scalar(pointMatRgba.get(0, 0));
}
}
So i'm trying to detect an square object on android using opencv on ndk, and crop it to a 2D figure, I detect the points of the squares, but When i Try to use getPerspectiveTRansform(src, dst) I get this error Message:
OpenCV Error: Assertion failed (src.checkVector(2, CV_32F) == 4 && dst.checkVector(2, CV_32F) == 4) in cv::Mat cv::getPerspectiveTransform(cv::InputArray, cv::InputArray), file /home/reports/ci/slave/50-SDK/opencv/modules/imgproc/src/imgwarp.cpp, line 3607
This is My Activity in android:
package org.opencv.samples.tutorial1;
import java.io.ByteArrayOutputStream;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewFrame;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.android.Utils;
import org.opencv.core.CvException;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Size;
import org.opencv.imgproc.Imgproc;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener2;
import android.app.Activity;
import android.content.Intent;
import android.graphics.Bitmap;
import android.os.Bundle;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.SurfaceView;
import android.view.View;
import android.view.WindowManager;
import android.view.View.OnClickListener;
import android.widget.Toast;
public class Tutorial1Activity extends Activity implements
CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private Mat mRgba;
private Mat mGrayMat;
private Mat imageTaken;
private CameraBridgeViewBase mOpenCvCameraView;
private boolean mIsJavaCamera = true;
private MenuItem mItemSwitchCamera = null;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i(TAG, "OpenCV loaded successfully");
System.loadLibrary("native_sample");
mOpenCvCameraView.enableView();
}
break;
default: {
super.onManagerConnected(status);
}
break;
}
}
};
public Tutorial1Activity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
/** Called when the activity is first created. */
#Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.tutorial1_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial1_activity_native_surface_view);
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
}
#Override
public void onPause() {
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
#Override
public void onResume() {
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this,
mLoaderCallback);
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
#Override
public boolean onCreateOptionsMenu(Menu menu) {
Log.i(TAG, "called onCreateOptionsMenu");
// mItemSwitchCamera = menu.add("Toggle Native/Java camera");
return true;
}
#Override
public boolean onOptionsItemSelected(MenuItem item) {
String toastMesage = new String();
Log.i(TAG, "called onOptionsItemSelected; selected item: " + item);
if (item == mItemSwitchCamera) {
mOpenCvCameraView.setVisibility(SurfaceView.GONE);
mIsJavaCamera = !mIsJavaCamera;
if (mIsJavaCamera) {
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial1_activity_java_surface_view);
toastMesage = "Java Camera";
} else {
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial1_activity_native_surface_view);
toastMesage = "Native Camera";
}
mOpenCvCameraView.setVisibility(SurfaceView.VISIBLE);
mOpenCvCameraView.setCvCameraViewListener(this);
mOpenCvCameraView.enableView();
mOpenCvCameraView.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View v) {
takePicture();
}
});
Toast toast = Toast.makeText(this, toastMesage, Toast.LENGTH_LONG);
toast.show();
}
return true;
}
public void takePicture() {
if (imageTaken != null) {
Bitmap resultBitmap = null;
try {
// Imgproc.cvtColor(imageTaken, imageTaken,
// Imgproc.COLOR_BGR2GRAY);
// Imgproc.cvtColor(imageTaken, imageTaken,
// Imgproc.COLOR_GRAY2RGBA, 4);
/*
* Mat test =
* Imgproc.getPerspectiveTransform(ImageSrc,ImageDst);
* Imgproc.warpPerspective(ImageSrc, ImageDst, test,
* ImageDst.size());
*/
resultBitmap = Bitmap.createBitmap(imageTaken.cols(),
imageTaken.rows(), Bitmap.Config.ARGB_8888);
//
Utils.matToBitmap(imageTaken, resultBitmap);
byte[] sendData = codec(resultBitmap,
Bitmap.CompressFormat.JPEG, 50);
Intent i = new Intent(getApplicationContext(),
ShowImageActivity.class);
i.putExtra("data", sendData);
startActivity(i);
} catch (CvException e) {
// TODO: handle exception
e.printStackTrace();
}
}
}
private byte[] codec(Bitmap src, Bitmap.CompressFormat format, int quality) {
ByteArrayOutputStream os = new ByteArrayOutputStream();
src.compress(format, quality, os);
byte[] array = os.toByteArray();
System.out.println(array.length);
// return BitmapFactory.decodeByteArray(array, 0, array.length);
return array;
}
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat();
mGrayMat = new Mat();
imageTaken = new Mat();
}
public void onCameraViewStopped() {
mRgba.release();
mGrayMat.release();
imageTaken.release();
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
/*long start = System.currentTimeMillis();
Size originalSize = inputFrame.rgba().size();
Imgproc.resize(inputFrame.rgba(), mRgba, new Size(800, 480));
*/
// FindSquares(inputFrame.rgba().getNativeObjAddr(), 1);
// imageTaken = inputFrame.clone();
// System.out.println(inputFrame.rgba().type());
findSquare(inputFrame.rgba().getNativeObjAddr(), imageTaken.getNativeObjAddr(), 1);
// if (mDraw == 1) {
/*Imgproc.resize(mRgba, inputFrame.rgba(), originalSize);
// }
long end = System.currentTimeMillis();
Log.d("Frame time", "" + (end - start) + " ms");
*/
return inputFrame.rgba();
}
public native void FindFeatures(long matAddrGr, long matAddrRgba);
public native int FindSquares(long matAddrRgba, int draw);
public native void findSquare(long matAddrRgba, long matAddrDescriptor, int draw);
}
And this is my jni code:
JNIEXPORT jint JNICALL Java_com_gconsent_opencv_MainActivity_findSquare(JNIEnv*,
jobject, jlong addrRgba, jlong addrDescriptor, jlong addrSrc, jlong addrDst, jint draw){
Mat& image = *(Mat*) addrRgba;
Mat& imageCropped = *(Mat*) addrDescriptor;
Mat& imageSrc = *(Mat*) addrSrc;
Mat& imageDst = *(Mat*) addrDst;
Mat newSrc = image.clone();
imageCropped = image.clone();
Mat testImage = image.clone();
// blur will enhance edge detection
Mat blurred(testImage);
medianBlur(testImage, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector < vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 0; c < 3; c++) {
int ch[] = { c, 0 };
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
// try several threshold levels
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++) {
// Use Canny instead of zero threshold level!
// Canny helps to catch squares with gradient shading
if (l == 0) {
Canny(gray0, gray, 10, 20, 3); //
// Dilate helps to remove potential holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
} else {
gray = gray0 >= (l + 1) * 255 / threshold_level;
}
// Find contours and store them in a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
// Test contours
vector < Point > approx;
for (size_t i = 0; i < contours.size(); i++) {
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx,
arcLength(Mat(contours[i]), true) * 0.02, true);
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 && fabs(contourArea(Mat(approx))) > 1000
&& isContourConvex(Mat(approx))) {
double maxCosine = 0;
for (int j = 2; j < 5; j++) {
double cosine = fabs(
angle(approx[j % 4], approx[j - 2],
approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3) {
line(image, approx[0], approx[1],
Scalar(0, 255, 0, 255), 2, 4, 0);
line(image, approx[1], approx[2],
Scalar(0, 255, 0, 255), 2, 4, 0);
line(image, approx[2], approx[3],
Scalar(0, 255, 0, 255), 2, 4, 0);
line(image, approx[3], approx[0],
Scalar(0, 255, 0, 255), 2, 4, 0);
vector<Point2f> src(4);
// src.push_back(approx[0]);
// src.push_back(approx[1]);
// src.push_back(approx[2]);
// src.push_back(approx[3]);
src[0] = approx[0];
src[1] = approx[1];
src[2] = approx[2];
src[3] = approx[3];
cv::Mat quad = cv::Mat::zeros(300, 220, CV_8U);
// transformed quadrangle
vector < Point2f > quad_pts(4);
// Point2f quad_pts[4];
quad_pts.push_back(Point(0, 0));
quad_pts.push_back(Point(quad.cols, 0));
quad_pts.push_back(Point(quad.cols, quad.rows));
quad_pts.push_back(Point(0, quad.rows));
// quad_pts[0] = Point(0, 0);
// quad_pts[1] = Point(quad.cols, 0);
// quad_pts[2] = Point(quad.cols, quad.rows);
// quad_pts[3] = Point(0, quad.rows);
imageSrc = Mat(src);
imageDst = Mat(quad_pts);
Mat transmtx = getPerspectiveTransform(src, quad_pts);
warpPerspective(src, quad, transmtx, quad.size());
imageCropped = quad.clone();
}
}
}
}
}
// imageCropped = getPolygon(newSrc);
return 1;
}
Change the input cv::Mat type to CV_32FC2.
Also take a look at this.