It seems that opencv can't use native camera on Android 5.+ ( lollipop ).
cf :
http://code.opencv.org/issues/4185
Is there an other way to grab pictures from a native activity and then convert into cv::mat ?
Or, maybe I could use jni to call a grab function in java from my c++ activity ?
Thank you for your help
Charles
You could use jni to call a grab function in java from c++ activity, like this(threshold example):
Java code:
//Override JavaCameraView OpenCV function
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
ProcessImage.threshold(mGray, mGray, 97, 0);
return mGray;
}
// Your functions
public static void threshold(Mat srcGray, Mat dst, int thresholdValue, int thresholdType) {
nativeThreshold(srcGray.getNativeObjAddr(), dst.getNativeObjAddr(), thresholdValue, thresholdType.ordinal());
}
private static native void nativeThreshold(long srcGray, long dst, int thresholdValue, int thresholdType);
JNI c++ code:
JNIEXPORT void JNICALL Java_{package}_nativeThreshold
(JNIEnv * jenv, jobject jobj, jlong scrGray, jlong dst, jint thresholdValue, jint thresholdType)
{
try
{
Mat matDst = *((Mat*)dst);
Mat matSrcGray = *((Mat*)scrGray);
threshold( matSrcGray, matDst, thresholdValue, max_BINARY_value, thresholdType );
}
catch(cv::Exception& e)
{
LOGD("nativeThreshold caught cv::Exception: %s", e.what());
jclass je = jenv->FindClass("org/opencv/core/CvException");
if(!je)
je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, e.what());
}
catch (...)
{
LOGD("nativeThreshold caught unknown exception");
jclass je = jenv->FindClass("java/lang/Exception");
jenv->ThrowNew(je, "Unknown exception in JNI code ProcessImage.nativeThreshold()");
}
}
Hope this helps!
Related
I want to use SWT(Stroke Width Transform) algorithm to improve Tesseract-OCR accuracy. I referred to this link.
I found ccv and a way to use it.
I want to use ccv_swt method, but I don't know how to use exactly.
Especially, ccv_swt_param_t params part.
This is my code.
MainActivity.java
private Mat matInput;
private Mat matGray;
public native void ocrTest(long matAddrGray, long matAddrCanny, long matAddrBlur);
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
matInput = inputFrame.rgba();
Imgproc.cvtColor(matInput, matGray, Imgproc.COLOR_RGB2GRAY);
ocrTest(matGray.getNativeObjAddr(), matCanny.getNativeObjAddr(), matBlur.getNativeObjAddr());
}
native-lib.cpp
extern "C"
JNIEXPORT void JNICALL
Java_com_example_ocrtest_MainActivity_ocrTest(JNIEnv *env, jobject instance, jlong matAddrGray,
jlong matAddrCanny, jlong matAddrBlur) {
// TODO
Mat &matGray = *(Mat *) matAddrGray;
Mat &matCanny = *(Mat *) matAddrCanny;
Mat &matBlur = *(Mat *) matAddrBlur;
int lowThreshold = 50;
int highThreshold = 150;
Canny(matGray, matCanny, lowThreshold, highThreshold, 3);
blur(matGray, matBlur, Size(3, 3));
}
I am trying to detect facial landmark using opencv ,dlib library in android studio.
I can capture image and detect landmarks from the image.
But I am facing problem when I am trying to detect facial landmark in real time.
Because I am deserializing shape_predictor_68_face_landmarks.dat file for each frame.
onCameraFrame method of MainActivity.Java
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame)
{
matInput =inputFrame.rgba();
NativeClass.LandmarkDetection(matInput.getNativeObjAddr(), matOutput.getNativeObjAddr());
return matOutput;
}
NativeClass.Java
package com.example.user.drowsinessdetection;
public class NativeClass {
public native static String getMessage();
public native static void LandmarkDetection(long addrInput,long addrOutput);
}
The cpp file for Landmark detection:
com_example_user_drowsinessdetection_NativeClass.cpp
#include <com_example_user_drowsinessdetection_NativeClass.h>
#include <dlib/geometry/rectangle.h>
JNIEXPORT jstring JNICALL
Java_com_example_user_drowsinessdetection_NativeClass_getMessage
(JNIEnv *env, jclass){
env->NewStringUTF("JNI message");
}
JNIEXPORT void JNICALL
Java_com_example_user_drowsinessdetection_NativeClass_LandmarkDetection
(JNIEnv *env, jclass thiz, jlong addrInput, jlong addrOutput){
Mat& image = *(Mat*)addrInput;
Mat& dst = *(Mat*)addrOutput;
faceDetectionDlib(image, dst);
}
void faceDetectionDlib(Mat& img, Mat& dst){
try {
frontal_face_detector detector = get_frontal_face_detector();
shape_predictor pose_model;
deserialize("storage/emulated/0/shape_predictor_68_face_landmarks.dat")>>pose_model;
cv_image<bgr_pixel>cimg(img);
std::vector<dlib::rectangle> faces = detector(cimg);
std::vector<full_object_detection> shapes;
int k=faces.size();
for(unsigned long i = 0;i < k; ++i)
shapes.push_back(pose_model(cimg,faces[i]));
dst = img.clone();
renderToMat(shapes, dst);
}
catch (serialization_error& e)
{
cout<<endl<<e.what()<<endl;
}
}
void renderToMat(std::vector<full_object_detection>& dets, Mat& dst){
Scalar color;
int sz = 3,l;
color = Scalar(0,255,0);
//chin line
l=dets.size();
for(unsigned long idx = 0; idx < l; idx++) {
//left eye
for (unsigned long i = 37; i <= 41; ++i)
cv::line(dst, Point(dets[idx].part(i).x(), dets[idx].part(i).y()),
Point(dets[idx].part(i - 1).x(), dets[idx].part(i - 1).y()), color, sz);
cv::line(dst, Point(dets[idx].part(36).x(), dets[idx].part(36).y()),
Point(dets[idx].part(41).x(), dets[idx].part(41).y()), color, sz);
//right eye
for (unsigned long i = 43; i <= 47; ++i)
cv::line(dst, Point(dets[idx].part(i).x(), dets[idx].part(i).y()),
Point(dets[idx].part(i - 1).x(), dets[idx].part(i - 1).y()), color, sz);
cv::line(dst, Point(dets[idx].part(42).x(), dets[idx].part(42).y()),
Point(dets[idx].part(47).x(), dets[idx].part(47).y()), color, sz);
//lips out part
}
}
I don't want execute this line
frontal_face_detector detector = get_frontal_face_detector();
and
deserialize("storage/emulated/0/shape_predictor_68_face_landmarks.dat")>>pose_model;
for each and every frame.
So How can I do these only once and then use these for all other frames?
I am a newbie.
Please help.
Sorry for my poor English. :)
I am using opencv-sdk-android.
I want that my native code should return keypoint vector. Is it correct to use code like this..
Vector<KeyPoint> keypoint = FindFeatures(Gray1.getNativeObjAddr(),descriptor.getNativeObjAddr());
and
public native Vector<KeyPoint> FindFeatures(long matAddrGr1, long matAddrGr2);
My natice code is
extern "C" {
JNIEXPORT Vector<KeyPoint> JNICALL Java_com_example_xyz_MainActivity_FindFeatures(JNIEnv*, jobject, jlong addrGray1, jlong addrdescrptor);
JNIEXPORT Vector<KeyPoint> JNICALL Java_com_example_xyz_MainActivity_FindFeatures(JNIEnv*, jobject, jlong addrGray1, jlong addrdescrptor)
{
Mat& mGr1 = *(Mat*)addrGray1;
Mat& descriptors_1 = *(Mat*)addrdescrptor;
vector<KeyPoint> keypoint_1;
//Do some processing here..
return keypoint_1;
}
}
If not please suggest me some altenative way to achieve it. am new in opencv.
I had the same problem and I solved it with this piece of code.
First of all in java code I've declared the function FindFeatures like this:
public native KeyPoint[] FindFeatures(long matAddrGr1, long matAddrGr2);
And my native code is:
JNIEXPORT jobjectArray JNICALL Java_com_example_mipatternrecognition_Reconocimiento_FindFeatures(
JNIEnv* env, jobject, jlong matAddrGr1, jlong matAddrGr2) {
Mat& mGr = *(Mat*) matAddrGr1;
Mat& mRgb = *(Mat*) matAddrGr2;
vector < KeyPoint > keyPoints_1;
//Do some processing...
// Get a class reference for org/opencv/features2d/KeyPoint
jclass cls = env->FindClass("org/opencv/features2d/KeyPoint");
// Get the Method ID of the constructor (Float,Float,Float,Float,Float,Integer,Integer)
jmethodID midInit = env->GetMethodID(cls, "<init>", "(FFFFFII)V");
// Call back constructor to allocate a new instance
jobjectArray newKeyPointArr = env->NewObjectArray(keyPoints_1.size(), cls, NULL);
for (unsigned int i = 0; i < keyPoints_1.size(); i++) {
jobject newKeyPoint = env->NewObject(cls, midInit, keyPoints_1[i].pt.x,
keyPoints_1[i].pt.y, keyPoints_1[i].size, keyPoints_1[i].angle,
keyPoints_1[i].response, keyPoints_1[i].octave,
keyPoints_1[i].class_id);
env->SetObjectArrayElement(newKeyPointArr, i, newKeyPoint);
}
return newKeyPointArr;
}
I hope it helps to you...
I want to use native memory instead of Java heap, for camera buffer in camera.addCallbackBuffer();
I write some code, but it's wrong. I get null -array.
How to do it properly?
Java PART
buflen=allocBuffer(1280,720);
byte[] x= getBuffern(0,1280,720);//than i want to use this for addCallbackBuffer()
freeBuffer();
NDK PART
signed char * yuvm;
size_t getTotalSystemMemory()
{
long pages = sysconf(_SC_PHYS_PAGES);
long page_size = sysconf(_SC_PAGE_SIZE);
return pages * page_size;
}
jint Java_com_example_testdvr_mycamview_allocBuffer(JNIEnv * env, jclass obj, jint width, jint height){
jint bufLength=getTotalSystemMemory()/4;
bufLength=bufLength/(width*height*1.5);
yuvm = (signed char*) malloc (bufLength*width*height*1.5);
return bufLength;
}
void Java_com_example_testdvr_mycamview_freeBuffer(JNIEnv * env, jclass obj){
if (sizeof(yuvm)!=0){
free(yuvm);
}
}
jbyteArray Java_com_example_testdvr_mycamview_getBuffern(JNIEnv * env, jclass obj, jint numbuf, jint width, jint height){
jbyteArray res;
env->SetByteArrayRegion(res,width*height*1.5*numbuf,width*height*1.5,yuvm);
return res;
}
Also i have idea to use camera object from NDK for native memory for buffer.
LIKE THIS:
JNIEXPORT void JNICALL Java_com_test_jnicall_ld(
JNIEnv *env,
jclass clazs,
jobject camera) {
jclass clazz = env->GetObjectClass(camera);
jmethodID voidVoidMethod = env->GetMethodID(clazz,"addCallbackBuffer", "([B)V");
jbyteArray* b=malloc(640*480*3/2);
env->CallVoidMethod(camera, voidVoidMethod,b);
}
But have a problem to:
09-18 13:24:22.982: W/dalvikvm(27090): Invalid indirect reference 0x783f8008 in decodeIndirectRef
09-18 13:24:22.982: E/dalvikvm(27090): VM aborting
09-18 13:24:22.982: A/libc(27090): Fatal signal 6 (SIGABRT) at 0x000069d2 (code=-6), thread 27123 (Thread-4070)
I have an OpenCV Android app. Most of its code is in Java but I have
one function that is in C.
The function gets a Mat object and returns a new one.
My question is how do I return a Mat from the native code to Java?
Couldn't find any example of that.
Thanks.
Today I had to return a Mat from native code. I started with "Tutorial 2 Advanced - 2. Mix Java+Native OpenCV" it already passes two Mat (Images captured from camera) objects to the native code.
But I wanted to return extracted feature, thus I added jlong addrDescriptor to the signature:
extern "C" {
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial4_Sample4View_FindFeatures(JNIEnv* env, jobject thiz, jlong addrGray, jlong addrRgba, jlong addrDescriptor)
{
Mat* pMatGr=(Mat*)addrGray;
Mat* pMatRgb=(Mat*)addrRgba;
Mat* pMatDesc=(Mat*)addrDescriptor;
vector<KeyPoint> v;
//OrbFeatureDetector detector(50);
OrbFeatureDetector detector;
OrbDescriptorExtractor extractor;
detector.detect(*pMatGr, v);
extractor.compute( *pMatGr, v, *pMatDesc );
circle(*pMatRgb, Point(100,100), 10, Scalar(5,128,255,255));
for( size_t i = 0; i < v.size(); i++ ) {
circle(*pMatRgb, Point(v[i].pt.x, v[i].pt.y), 10, Scalar(255,128,0,255));
}
}
}
In the java part I added the Mat
private Mat descriptor;
descriptor = new Mat();
The method getNativeObjAddr() does the trick. The Mat is allocated in java and its address is passed to the native code, thus there isn't any explicit returning.
FindFeatures(mGraySubmat.getNativeObjAddr(), mRgba.getNativeObjAddr(), descriptor.getNativeObjAddr());
Log.i("desc:" , descriptor.dump());
The Mat was filled with the required data and is directly accessible in the java code after the JNI invokation returns.
Somwhere else in the code the Mat is released:
if ( descriptor != null)
descriptor.release();
descriptor = null;
in C++
jlong funC(){
Mat *mat = new Mat();
//...
return (jlong)mat;
}
in java:
long = addr;// addr is return from c method funC()
Mat mat = new Mat(addr);
Attention: You must new Mat() in C,if you code is : Mat mat();mat object memory will be collect when funC() end.