I have a large problem with drawn function in my android jni app using OpenCV. I track objects in my native function and forward rectangles Vector by Mat to Java code, then draw rectangles on RGBA Mat in Java and it all works. But now, I have draw these rectangles in native code in my function before return to Java, but it doesn't work. I tried to draw anything from the jni, but still no effect. Drawn functions doesn't change the Mat, which turn into Java. I would be very grateful for help.
This is my native code:
JNIEXPORT void JNICALL Java_org_opencv_samples_facedetect_DetectionBasedTracker_nativeDetect
(JNIEnv * jenv, jclass, jlong thiz, jlong imageGray, jlong faces)
{
LOGD("Java_org_opencv_samples_facedetect_DetectionBasedTracker_nativeDetect enter");
try
{
vector<Rect> RectFaces;
((DetectionBasedTracker*)thiz)->process(*((Mat*)imageGray));
((DetectionBasedTracker*)thiz)->getObjects(RectFaces);
for(int i=0; i<RectFaces.size(); i++)
{
Point p,k;
p.x = ((Rect)RectFaces[i]).x;
p.y = ((Rect)RectFaces[i]).y;
k.x = ((Rect)RectFaces[i]).x + ((Rect)RectFaces[i]).width;
k.y = ((Rect)RectFaces[i]).y + ((Rect)RectFaces[i]).height;
rectangle(*((Mat*)imageGray), p, k,Scalar(0,255,255, 255), -1, 8);
}
vector_Rect_to_Mat(RectFaces, *((Mat*)faces));
}
catch(cv::Exception& e)
{
LOGD("nativeCreateObject caught cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
catch (...)
{
LOGD("nativeDetect caught unknown exception");
jclass je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, "Unknown exception in JNI code {highgui::VideoCapture_n_1VideoCapture__()}");
}
LOGD("Java_org_opencv_samples_facedetect_DetectionBasedTracker_nativeDetect exit");
}
and java code which return rgba Mat:
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
//Imgproc.Canny(mGray, mCanny, CANNY_MIN_TRESHOLD, CANNY_MAX_TRESHOLD);
Point center = new Point(mRgba.width() / 2, mRgba.height() / 2);
//must be 1 channels 8 bit!
/*//do Hough transform to find lines
double rho = 1;
double theta = Math.PI/180;
Imgproc.HoughLinesP(mCanny, mLines, rho, theta, HOUGH_TRESHOLD, HOUGH_MIN_LINE_LENGTH, HOUGH_MAX_LINE_GAP);*/
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2,
2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize),
new Size());
} else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
//mNativeDetector.findLines(mGray, mCanny);
} else {
Log.e(TAG, "Detection method is not selected!");
}
/*Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++) {
if (center.x > facesArray[i].tl().x
&& center.x < facesArray[i].br().x) {
if (center.y > facesArray[i].tl().y
&& center.y < facesArray[i].br().y) {
Core.rectangle(mRgba, facesArray[i].tl(),
facesArray[i].br(), CAR_RECT_COLOR_RED, 3);
} else {
Core.rectangle(mRgba, facesArray[i].tl(),
facesArray[i].br(), CAR_RECT_COLOR_YELLOW, 3);
}
} else {
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(),
CAR_RECT_COLOR_YELLOW, 3);
}
}*/
Point p1 = new Point(mRgba.width() / 2, 0);
Point p2 = new Point(mRgba.width() / 2, mRgba.height());
Point p3 = new Point(0, mRgba.height() / 2);
Point p4 = new Point(mRgba.width(), mRgba.height() / 2);
Core.line(mRgba, p3, p4, AXIS_COLOR);
Core.line(mRgba, p1, p2, AXIS_COLOR);
return mRgba;
//return mLines;
}
Why are you passing a gray-scale image to the function?
If you want to show things up on the screen, you should pass RGBA data.
Related
I am trying to use this code http://androiderstuffs.blogspot.com/2016/06/detecting-rectangle-using-opencv-java.html to detect card. But instead of putting card on plane surface, I will be holding this card in hand in-front of my Head. Problem is, its not detecting card rectangle. I am new to OpenCV. See my code below, this code will highlight all found rectangles in output image. Problem is, it never find card rectangle.
private void findRectangleOpen(Bitmap image) throws Exception {
Mat tempor = new Mat();
Mat src = new Mat();
Utils.bitmapToMat(image, tempor);
Imgproc.cvtColor(tempor, src, Imgproc.COLOR_BGR2RGB);
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
List<Mat> blurredChannel = new ArrayList<Mat>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<Mat>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = {c, 0};
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 10, 20, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1
// ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel,
Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,
Imgproc.THRESH_BINARY,
(src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(),
Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
int i = 0;
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve,
Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= 200 && area <= 40000) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4),
curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.3) {
Imgproc.drawContours(src, contours, i, new Scalar(255, 0, 0), 3);
Bitmap bmp;
bmp = Bitmap.createBitmap(src.cols(), src.rows(),
Bitmap.Config.ARGB_8888);
Utils.matToBitmap(src, bmp);
ByteArrayOutputStream stream = new ByteArrayOutputStream();
bmp.compress(Bitmap.CompressFormat.PNG, 100, stream);
byte[] byteArray = stream.toByteArray();
//File origFile = getFileForSaving();
savePhoto(byteArray);
bmp.recycle();
}
}
i++;
}
}
}
}
private static double angle(org.opencv.core.Point p1, org.opencv.core.Point p2, org.opencv.core.Point p0) {
double dx1 = p1.x - p0.x;
double dy1 = p1.y - p0.y;
double dx2 = p2.x - p0.x;
double dy2 = p2.y - p0.y;
return (dx1 * dx2 + dy1 * dy2)
/ sqrt((dx1 * dx1 + dy1 * dy1) * (dx2 * dx2 + dy2 * dy2)
+ 1e-10);
}
Sample output image is:
Output of detecting rectangle
Below is the code to draw rectangle on tracked face. I want to replace this rectangle with a custom overlay like hat image.
This code is executed inside onCameraFrame method of openCV and mRgba is the mat object of camera preview.
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
} else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++) {
Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(),
FACE_RECT_COLOR, 3);
}
return mRgba;
}
The above image is my output
i am using opencv edge dection c++ code,as written below
JNIEXPORT jfloatArray JNICALL Java_com_test_getPoints
(JNIEnv *env, jobject thiz,jobject bitmap)
{
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning getPoints");
int ret;
AndroidBitmapInfo info;
void* pixels = 0;
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_getInfo() failed ! error=%d", ret);
return 0;
}
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888 )
{ __android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"Bitmap format is not RGBA_8888!");
return 0;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME,"AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
Mat mbgra(info.height, info.width, CV_8UC4, pixels);
=========================================
vector<Point> img_pts = getPoints(mbgra);
=====================================
jfloatArray jArray = env->NewFloatArray(8);
if (jArray != NULL)
{
jfloat *ptr = env->GetFloatArrayElements(jArray, NULL);
for (int i=0,j=i+4; j<8; i++,j++)
{
ptr[i] = img_pts[i].x;
ptr[j] = img_pts[i].y;
}
env->ReleaseFloatArrayElements(jArray, ptr, NULL);
}
AndroidBitmap_unlockPixels(env, bitmap);
return jArray;
}
vector<Point> getPoints(Mat image)
{
int width = image.size().width;
int height = image.size().height;
Mat image_proc = image.clone();
vector<vector<Point> > squares;
Mat blurred(image_proc);
medianBlur(image_proc, blurred, 9);
Mat gray0(blurred.size(), CV_8U), gray;
vector<vector<Point> > contours;
for (int c = 0; c < 3; c++)
{
int ch[] = {c, 0};
mixChannels(&blurred, 1, &gray0, 1, ch, 1);
const int threshold_level = 2;
for (int l = 0; l < threshold_level; l++)
{
if (l == 0)
{
Canny(gray0, gray, 10, 20, 3); //
dilate(gray, gray, Mat(), Point(-1,-1));
}
else
{
gray = gray0 >= (l+1) * 255 / threshold_level;
}
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
for (size_t i = 0; i < contours.size(); i++)
{
approxPolyDP(Mat(contours[i]), approx, arcLength(Mat(contours[i]), true)*0.02, true);
if (approx.size() == 4 &&
fabs(contourArea(Mat(approx))) > 1000 &&
isContourConvex(Mat(approx)))
{
double maxCosine = 0;
for (int j = 2; j < 5; j++)
{
double cosine = fabs(angle(approx[j%4], approx[j-2], approx[j-1]));
maxCosine = MAX(maxCosine, cosine);
}
if (maxCosine < 0.3)
squares.push_back(approx);
}
}
}
double largest_area = -1;
int largest_contour_index = 0;
for(int i=0;i<squares.size();i++)
{
double a =contourArea(squares[i],false);
if(a>largest_area)
{
largest_area = a;
largest_contour_index = i;
}
}
__android_log_print(ANDROID_LOG_VERBOSE, APPNAME, "Scaning size() %d",squares.size());
vector<Point> points;
if(squares.size() > 0)
{
points = squares[largest_contour_index];
}
else
{
points.push_back(Point(0, 0));
points.push_back(Point(width, 0));
points.push_back(Point(0, height));
points.push_back(Point(width, height));
}
return points;
}
}
How to find accurate edges as per above screen shoot.
i am new for opencv can any one guide me,thanks for response.
The ouput image
required output
Hello Android Programmers here, kindly check my code to fill the parameters about cropping the image detected.
I don't know what should be the values to put in parameters about cropping the face detected in android using createBitmap(Bitmapsource,x,y,width,height).
How can i get some integer for the x and y to fill the parameters in Bitmap function.
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows();
if (Math.round(height * mRelativeFaceSize) > 0) {
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2, 2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
else if (mDetectorType == NATIVE_DETECTOR) {
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
//crop
//crop
viewWidth = part1.getMeasuredWidth();
viewHeight = part1.getMeasuredHeight();
//create the bitmap
bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
Resizebmp = Bitmap.createBitmap(bmp, x, y, viewWidth, viewHeight);
try {
Utils.matToBitmap(mRgba, bmp);
} catch(Exception e) {
Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmp.recycle();
bmp = null;
}
return mRgba;
}
Correct me if I'm wrong. Do you mean that you had detected the face in a large bitmap and you want to crop the face out to a smaller bitmap?
For example, you have a 640 x 480 bitmap and you found the face inside rectangular with upper left x1, y1 and lower right x2, y2. Then it should be something like:
resizeBmp = Bitmap.createBitmap(bmp, x1, y1, x2 - x1, y2 - y1);
I am trying to use OpenCV face detection using the the byte[] data obtained from the onPreviewFrame() method of the Camera.PreviewCallback
I manage to convert the data into grayscale image using the codes below.
Mat matNew = new Mat(pHeight, pWidth, CvType.CV_8U);
matNew.put(0, 0, data);
Mat matrgb = new Mat();
Imgproc.cvtColor(matNew, matrgb, Imgproc.COLOR_YUV420sp2RGB, 4);
Mat matgray = new Mat();
Imgproc.cvtColor(matrgb, matgray, Imgproc.COLOR_RGB2GRAY, 0);
and I have set android:screenOrientation to "portrait" in the AndroidManifest file.
I am using OpenCV JavaDetector
mJavaDetector.detectMultiScale(matgray, faceDetected, 1.1, 3, 0,
new org.opencv.core.Size(0,0), new org.opencv.core.Size(matgray.width(), matgray.height()));
and drawing a rectangle over the faces detected using this
for (Rect rect : faceDetected.toArray()){
Core.rectangle(matgray, new Point(rect.x, rect.y),
new Point(rect.x + rect.width, rect.y + rect.height),
new Scalar(0, 255, 0));
}
However, in the resulting grayscale mat, face detection only happens when I hold my Android phone in landscape position. It does not work in portrait position.
The transpose and flip suggested in this post How to detect face by portrait mode? doesn't seem to work.
Is there any way to overcome this issue? I have used the Android FaceDetectionListener and that doesn't seem to have problem detecting faces in portrait mode. But, FaceDetectionListener's functions are limited compared to OpenCV.
Any help would be greatly appreciated. Thx.
Something like the following works for me with android:screenOrientation="portrait" in AndroidManifest.xml
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
MatOfRect faces = new MatOfRect();
Core.flip(mRgba.t(), mRgba, -1);
Core.flip(mGray.t(), mGray, -1);
if (mNativeDetector != null)
mNativeDetector.detect(mGray, faces);
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 2);
return mRgba;
}
Flip the colored and gray image(Mat) clockwise for face/feature detection to work in portrait mode.
At the end of feature detection Logic, you flip counter clockwise the colored image(mRgba Mat) . As illustrated.
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
Core.flip(inputFrame.gray().t(),mGray,1); //rotate clockwise
Core.flip(inputFrame.rgba().t(),mRgba,1);
mRgba=Feature_DetectionNATIVE(mRgba,mGray);
Core.flip(mRgba.t(),mRgba,0); //rotate counter clockwise
//this is a solution for allowing face detection in portrait view if it isn't working at all.
return mRgba;
}
public Mat Feature_DetectionNATIVE(Mat mRgba2, final Mat Gray)
{
if (mAbsoluteFaceSize == 0)
{
int height = Gray.rows();
if (Math.round(height * mRelativeFaceSize) > 0)
{
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
mNativeDetector.setMinFaceSize(mAbsoluteFaceSize);
}
MatOfRect faces = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR)
{
if (mJavaDetector != null)
mJavaDetector.detectMultiScale(Gray, faces, 1.1, 2, 2,
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize), new Size());
}
else if (mDetectorType == NATIVE_DETECTOR)
{
if (mNativeDetector != null)
mNativeDetector.detect(Gray, faces);
}
Rect[] facesArray = faces.toArray();
for (int i = 0; i < facesArray.length; i++)
{
Core.rectangle(mRgba2, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
}
return mRgba2;
}
After that the camera will show face detection in landscape orientation to fix this you rotate the canvas clockwise by 90 in opencv's CameraBridgeViewBase main class or hack it.(Note this reduces FPS but face detection is still fast)
protected void deliverAndDrawFrame(CvCameraViewFrame frame) {
Mat modified;
if (mListener != null) {
modified = mListener.onCameraFrame(frame);
} else {
modified = frame.rgba();
}
boolean bmpValid = true;
if (modified != null) {
try {
Utils.matToBitmap(modified, mCacheBitmap);
} catch(Exception e) {
Log.e(TAG, "Mat type: " + modified);
Log.e(TAG, "Bitmap type: " + mCacheBitmap.getWidth() + "*" + mCacheBitmap.getHeight());
Log.e(TAG, "Utils.matToBitmap() throws an exception: " + e.getMessage());
bmpValid = false;
}
}
if (bmpValid && mCacheBitmap != null) {
Canvas canvas = getHolder().lockCanvas();
if (canvas != null) {
canvas.drawColor(0, android.graphics.PorterDuff.Mode.CLEAR);
Log.d(TAG, "mStretch value: " + mScale);
canvas=rotateCanvas(canvas,mCacheBitmap);
getHolder().unlockCanvasAndPost(canvas);
}
}
}
protected Canvas rotateCanvas(final Canvas canvas, final Bitmap mCacheBitmap)
{
final CountDownLatch latch =new CountDownLatch(1);
final Mat[] mRgba=new Mat[1];
new Thread(new Runnable() {
#Override
public void run() {
try {
Bitmap bitmap = Bitmap.createScaledBitmap(mCacheBitmap, canvas.getHeight(), canvas.getWidth(), true);
canvas.rotate(90,0,0);
mScale = canvas.getWidth() / (float)bitmap.getHeight();
float scale2 = canvas.getHeight() / (float)bitmap.getWidth();
if(scale2 > mScale){
mScale = scale2;
}
if (mScale != 0) {
canvas.scale(mScale, mScale,0,0);
}
canvas.drawBitmap(bitmap, 0, -bitmap.getHeight(), null);
}
catch (CvException e) { e.printStackTrace();}
latch.countDown();//setting //release await() in this thread
}
}).start();
try { latch.await(); //waits for countDown in the Thread inorder to obtain a value from the thread
} catch (InterruptedException e) { e.printStackTrace();}
return canvas;
}
This solution works when one is implementing Opencv's CameraBridgeViewBase class
(Tested on OpenCV 2.4.9)