I have a huge problem with OpenCV 3.10 under Android. I am developing an App which does TemplateMatching of an Camera Preview.
The first approach has been to use the OpenCV Java Wrapper which worked okay. One Processing cycle took about 3.6s. To speed this up i redeveloped the code in C++. For some reason since of that the execution of one cycle started to take up to 35s.
Trying to speed this up and leverage the multithreading abilities i move the JNI execution to an AsyncTask. Since that, a single execution takes up to 65s.
I am using the gradle experimental plugin 0.7.0 which is considered stable and the most recent NDK (12.1 as of now).
here's my module build.gradle
ndk {
moduleName "OpenCVWrapper"
ldLibs.addAll(["android", "log", "z"])
cppFlags.add("-std=c++11")
cppFlags.add("-fexceptions")
cppFlags.add("-I"+file("src/main/jni").absolutePath)
cppFlags.add("-I"+file("src/main/jni/opencv2").absolutePath)
cppFlags.add("-I"+file("src/main/jni/opencv").absolutePath)
stl = "gnustl_shared"
debuggable = "true"
}
productFlavors {
create("arm") {
ndk.with {
abiFilters.add("armeabi")
String libsDir = file('../openCVLibrary310/src/main/jniLibs/armeabi/').absolutePath+'/'
ldLibs.add(libsDir + "libopencv_core.a")
ldLibs.add(libsDir + "libopencv_highgui.a")
ldLibs.add(libsDir + "libopencv_imgproc.a")
ldLibs.add(libsDir + "libopencv_java3.so")
ldLibs.add(libsDir + "libopencv_ml.a")
}
}
create("armv7") {
ndk.with {
abiFilters.add("armeabi-v7a")
String libsDir = file('../openCVLibrary310/src/main/jniLibs/armeabi-v7a/').absolutePath+'/'
ldLibs.add(libsDir + "libopencv_core.a")
[... and so on ...]
So heres the Android-Java code which executed in about 3-4 seconds:
// data is byte[] from camera
Mat yuv = new Mat(height+height/2, width, CvType.CV_8UC1);
yuv.put(0,0,data);
Mat input = new Mat(height, width, CvType.CV_8UC3);
Imgproc.cvtColor(yuv, input, Imgproc.COLOR_YUV2RGB_NV12, 3);
yuv.release();
int midPoint = Math.min(input.cols(), input.rows())/2;
Mat rotated = new Mat();
Imgproc.warpAffine(input, rotated,
Imgproc.getRotationMatrix2D(new Point(midPoint, midPoint), 270, 1.0),
new Size(input.rows(), input.cols()));
input.release();
android.util.Size packageRect = midRect.getSize();
input.release();
Rect r = new Rect(((rotated.cols()/2)-(packageRect.getWidth()/2)),
((rotated.rows()/2)-(packageRect.getHeight()/2)),
packageRect.getWidth(), packageRect.getHeight());
Mat cut = new Mat(rotated, r);
Mat scaled = new Mat();
Imgproc.resize(cut,scaled, new Size(323, 339), 0, 0, Imgproc.INTER_AREA);
Imgcodecs.imwrite(getExternalFileName("cutout").getAbsolutePath(), cut);
cut.release();
Mat output = new Mat();
Imgproc.matchTemplate(pattern, scaled, output, Imgproc.TM_CCOEFF_NORMED);
Core.MinMaxLocResult tmplResult = Core.minMaxLoc(output);
findPackage(tmplResult.maxLoc.x+150);
scaled.release();
input.release();
output.release();
cut.release();
In turn thats the C++ code to do exactly the same:
JNIEXPORT void JNICALL Java_at_identum_planogramscanner_ScanActivity_scanPackage(JNIEnv *env, jobject instance, jbyteArray input_, jobject data, jlong output, jint width, jint height, jint rectWidth, jint rectHeight) {
jbyte *input = env->GetByteArrayElements(input_, NULL);
jclass resultDataClass = env->GetObjectClass(data);
jmethodID setResultMaxXPos = env->GetMethodID(resultDataClass, "setMaxXPos", "(I)V");
jmethodID setResultMinXPos = env->GetMethodID(resultDataClass, "setMinXPos", "(I)V");
jmethodID setResultMinVal = env->GetMethodID(resultDataClass, "setMinVal", "(F)V");
jmethodID setResultMaxVal = env->GetMethodID(resultDataClass, "setMaxVal", "(F)V");
LOGE("Before work");
Mat convert(height+height/2, width, CV_8UC1, (unsigned char*)input);
Mat img(height, width, CV_8UC3);
cvtColor(convert, img, CV_YUV2RGB_NV12, 3);
convert.release();
LOGE("After Colorconvert");
int midCoord = min(img.cols, img.rows)/2;
Mat rot;
Mat rotMat = getRotationMatrix2D(Point2f(midCoord,midCoord), 270, 1.0);
warpAffine(img, rot, rotMat, Size(img.rows, img.cols));
rotMat.release();
LOGE("After Rotation");
Rect r(
(rot.cols/2-rectWidth/2),
(rot.rows/2-rectHeight/2),
rectWidth, rectHeight );
Mat cut(rot,r);
rot.release();
LOGE("After Cutting");
Mat scaled(Size(323, 339), CV_8UC3);
resize(cut, scaled, Size(323,339),0,0,INTER_AREA);
cut.release();
LOGE("After Scaling");
Mat match(pattern.cols, 1, CV_8UC1);
matchTemplate(pattern, scaled, match, TM_SQDIFF_NORMED);
scaled.release();
LOGE("After Templatematching and normalize");
double minVal; double maxVal; Point minLoc; Point maxLoc;
minMaxLoc(match, &minVal, &maxVal, &minLoc, &maxLoc, Mat());
img.release();
env->CallVoidMethod(data, setResultMinXPos, minLoc.x);
env->CallVoidMethod(data, setResultMaxXPos, maxLoc.x);
env->CallVoidMethod(data, setResultMinVal, minVal);
env->CallVoidMethod(data, setResultMaxVal, maxVal);
LOGE("After Calling JNI funcs");
env->ReleaseByteArrayElements(input_, input, 0);
as you can see it is practically exactly the same work and i expected it to run a little faster than written in Android-Java but for sure not 10 times slower and definetely not 20 times slower when ran from AsyncTask.
My best conclusion is that the .a archives of OpenCV need some kind of Compiler settings to speed up as much as possible. I hope anyone can point me into the right direction!
Thanks in advance!
I recently did a real-time face recognition application using the OpenCV's JAVA wrapper, and like you I wanted to squeeze more performance out of it so I implemented a JNI version. Again like your case, JNI version turns out to be slower than JAVA wrapper version albeit just a little.
For your case I can see why the performance suddenly suffered, which occurs here
jbyte *input = env->GetByteArrayElements(input_, NULL);
You can read more online that this is slow because JNI always copy (using GetByteArrayElements) from JAVA to C++. Depends on the camera preview size, the copy can be very significant especially for real-time process .
Here's a way to quicken up your code, instead of sending the Mat bytes to JNI, you can send the Mat pointer address directly,
In JAVA
public void processFrame(byte[] data) {
Mat raw = new Mat();
raw.put(0, 0, data); //place the bytes into a Mat
scanPackage(...,raw.native_obj, ...);
}
where native_obj is the address of the Mat object, which is type long
To convert jlong back to Mat in C++, change your jbyteArray input_ to jlong input_
JNIEXPORT void JNICALL Java_at_identum_planogramscanner_ScanActivity_scanPackage(..., jlong input_, ...) {
cv::Mat* pframe_addr = (cv::Mat*)input_;
Mat img(height, width, CV_8UC3);
cv::cvtColor(*pframe_addr,img,CV_YUV2RGB_NV12, 3);
/** The rest of your code */
Related
I need to convert an image to grayscale and then back to RGBA to be able to draw in it.
Currently, I am doing it with two different cvtColor calls, which works fine, although the performance is not good in Android (RGBA -> GRAY -> RGBA).
Getting a gray image from the camera directly is faster and only having to do one cvtColor call makes a huge difference (GRAY -> RGBA).
The problem is that the second method makes the app close after a few seconds. The logcat in Android Studio does not show a crash for the app, but it shows some errors with the No Filters option selected. Here is the log https://pastebin.com/jA7jFSvu. It seems to point to a problem with OpenCV's camera.
Below are the two different pieces of code.
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
// Method 1 - works
cameraImage = inputFrame.rgba();
native.exampleProcessImage1(cameraImage.getNativeObjAddr(), cameraImage.getNativeObjAddr());
return cameraImage;
// Method 2 - app closes after a few seconds
cameraImage = inputFrame.gray();
Mat result = new Mat();
native.exampleProcessImage2(cameraImage.getNativeObjAddr(), result.getNativeObjAddr());
return result;
}
And this is my code in C++:
void Java_com_example_native_exampleProcessImage1(JNIEnv *env, jobject instance, jlong sourceImage, jlong destImage) {
// works!
Mat &src = * ((Mat *) sourceImage);
Mat &dest = * ((Mat *) destImage);
Mat pivot;
// src is RGBA
cvtColor(src, pivot, COLOR_RGBA2GRAY);
cvtColor(pivot, dest, COLOR_GRAY2RGBA);
// dest is RGBA
// process
}
void Java_com_example_native_exampleProcessImage2(JNIEnv *env, jobject instance, jlong sourceImage, jlong destImage) {
// does not work
Mat &src = * ((Mat *) sourceImage);
Mat &dest = * ((Mat *) destImage);
// src is GRAY
cvtColor(src, dest, COLOR_GRAY2RGBA);
// dest is RGBA
// process
}
This works as expected on Linux and OpenCV.
Do you know what I am doing wrong? Is there another way to achieve the same? Performance is key, in particular for Android devices.
Thank you in advance.
For second case you have memory leak and this leads to leak
~ 3 sec * fps * frame_resolution * 4 bytes
I think crash is happening after the memory is full.
You need to call result.release(); somewhere after each exampleProcessImage2 call
I'm trying to run a piece of code through OpenCV Java, then pass the Mat object to OpenCV JNI code which does Canny Edge detection on it and returns the Mat. But somehow, I'm repeatedly getting a SIGSEGV when the app launches and I'm unsure why this is:
09-23 00:30:19.501 20399-20547/com.example.opencv.opencvtest A/libc: Fatal signal 11 (SIGSEGV), code 1, fault addr 0x3 in tid 20547 (Thread-7450)
The Java code segment in question is:
#Override
public void onCameraViewStarted(int width, int height) {
// Everything initialized
mGray = new Mat(height, width, CvType.CV_8UC4);
mGauss = new Mat(height, width, CvType.CV_8UC4);
mCanny = new Mat(height, width, CvType.CV_8UC4);
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mGray = inputFrame.rgba();
Imgproc.GaussianBlur(mGray, mGauss, new Size(), 5);
// This works perfectly fine
// Imgproc.Canny(mGauss, mCanny, 0, 20);
// But this causes a SIGSEGV
nativeCanny(mGauss.getNativeObjAddr(), mCanny.getNativeObjAddr());
return mCanny;
}
The JNI code is:
extern "C" {
JNIEXPORT jboolean JNICALL
Java_com_example_opencv_opencvtest_MainActivity_nativeCanny(JNIEnv *env, jobject instance, long iAddr, long oAddr) {
cv::Mat* blur = (cv::Mat*) iAddr;
cv::Mat* canny = (cv::Mat*) oAddr;
// This line is causing the SIGSEGV because if I comment it,
// everything works (but Mat* canny is empty so shows up black screen)
Canny(*blur, *canny, 10, 30, 3 );
return true;
}
}
Any idea why this is happening? I've spent the better half of the day trying to figure out why this is breaking but made no headway other than isolating the problematic statements.
EDIT: From the comments
I think it was an error with the initialization of mCanny. If I change the JNI call to Canny(*blur, *blur, 10, 30, 3 ); and then in Java return mGauss instead of mCanny then it works fine. This fixes it for the moment, but I'm honestly still unsure why mCanny is causing the SIGSEGV.
SEGV means you tried to read/write unallocated memory. The fault address is 3. Something that close to 0 almost always means you dereferenced a null pointer. My guess is that either mGauss or mCanny had a 0 for their native object addr.
I should rotate every yuv image buffer, which i receive from camera, on 90 degrees counterclockwise. I found this post where using a java. This code works fine.
But I've tried to do a native method, because I wanted to do method which has the same logic but works faster.
JNIEXPORT jbyteArray JNICALL Java_com_ndk_example_utils_NativeUtils_rotateFrameBackward
(JNIEnv *env, jobject obj, jbyteArray arr, jint w, jint h){
jint arrSize = w*h*3/2;
jbyte *data,*yuv;
data = (*env)->GetByteArrayElements(env, arr, JNI_FALSE);
yuv = (*env)->GetByteArrayElements(env, arr, JNI_FALSE);
int x,y,i = 0;
for(x = 0; x < w; x++){
for(y = h-1;y >= 0;y--){
yuv[i] = data[y*w+x];
i++;
}
}
i = arrSize - 1;
for(x = w-1;x > 0;x=x-2)
{
for(y = 0;y < h/2;y++)
{
yuv[i] = data[(w*h)+(y*w)+x];
i--;
yuv[i] = data[(w*h)+(y*w)+(x-1)];
i--;
}
}
(*env)->ReleaseByteArrayElements(env, arr, yuv, JNI_ABORT);
yuv = 0;
data = 0;
return arr;
}
When i launched this method on my htc 816(v5.1) it works fine, but when I launched the app on Samsung S3(v4.3) and Lenovo P-70(v4.4.2), the app is crashes. And in Android monitor tab in Android Studio, i saw that
memory usage is always increasing until my app is crashes. In my htc i don't have problems with it. Any ideas?
You do a double GetByteArrayElements for arr using data and yuv, then only release yuv. You also don't check if a copy was made using the last parameter, you just give in JNI_FALSE. You shouldn't do that, you should use a boolean parameter to receive the value, not try to tell the system whether to copy.
You should therefore release both pointers in the end.
Also if this code works, it means that copies are in fact made since you are reading and writing from the same memory area and that would cause corruption of the image.
How can I display results od a timer with a putText in my OpenCV Android app? The is detecting features on the view from a camera and the main algorithm and the timer is written in C++. The full code of my C++ JNI file:
#include <jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <vector>
using namespace std;
using namespace cv;
extern "C" {
JNIEXPORT void JNICALL
Java_org_opencv_samples_tutorial3_Sample3View_FindFeatures(JNIEnv* env, jobject, jint
width, jint height, jbyteArray yuv, jintArray bgra)
{
jbyte* _yuv = env->GetByteArrayElements(yuv, 0);
jint* _bgra = env->GetIntArrayElements(bgra, 0);
Mat myuv(height + height/2, width, CV_8UC1, (unsigned char *)_yuv);
Mat mbgra(height, width, CV_8UC4, (unsigned char *)_bgra);
Mat mgray(height, width, CV_8UC1, (unsigned char *)_yuv);
//Please make attention about BGRA byte order
//ARGB stored in java as int array becomes BGRA at native level
cvtColor(myuv, mbgra, CV_YUV420sp2BGR, 4);
vector<KeyPoint> v;
OrbFeatureDetector detector(1);
double t = (double)getTickCount();
detector.detect(mgray, v);
t = ((double)getTickCount() - t)/getTickFrequency();
putText(mbgra, t+" detection time", Point2f(100,100), FONT_HERSHEY_PLAIN, 2, Scalar(0,0,255,255), 2);
for( size_t i = 0; i < v.size(); i++ )
circle(mbgra, Point(v[i].pt.x, v[i].pt.y), 10, Scalar(0,0,255,255));
env->ReleaseIntArrayElements(bgra, _bgra, 0);
env->ReleaseByteArrayElements(yuv, _yuv, 0); }}
The problem is in the line with putText: I get an error "invalid operands of types 'double' and 'char const [15]' to binary 'operator+'". Is my timer OK? How else can I display the results of it? I will be grateful for your help.
't' is of class double and the constant " detection time" is treated as a string. String + double is something that the compiler doesn't understand, which is why it pukes on you.
Instead, try this approach:
std::stringstream s;
s << t;
s << " detection time";
putText(mbgra, s.str(), Point2f(100,100), FONT_HERSHEY_PLAIN, 2, Scalar(0,0,255,255), 2);
In the above code, the stringstream class has all the overloads built in to the "<<" operator such that it knows what to do with doubles and integers and strings and how to mash them together. With a little more research in to the various attributes, you can get it to format the precision of decimel points and such.
I have an OpenCV Android app. Most of its code is in Java but I have
one function that is in C.
The function gets a Mat object and returns a new one.
My question is how do I return a Mat from the native code to Java?
Couldn't find any example of that.
Thanks.
Today I had to return a Mat from native code. I started with "Tutorial 2 Advanced - 2. Mix Java+Native OpenCV" it already passes two Mat (Images captured from camera) objects to the native code.
But I wanted to return extracted feature, thus I added jlong addrDescriptor to the signature:
extern "C" {
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial4_Sample4View_FindFeatures(JNIEnv* env, jobject thiz, jlong addrGray, jlong addrRgba, jlong addrDescriptor)
{
Mat* pMatGr=(Mat*)addrGray;
Mat* pMatRgb=(Mat*)addrRgba;
Mat* pMatDesc=(Mat*)addrDescriptor;
vector<KeyPoint> v;
//OrbFeatureDetector detector(50);
OrbFeatureDetector detector;
OrbDescriptorExtractor extractor;
detector.detect(*pMatGr, v);
extractor.compute( *pMatGr, v, *pMatDesc );
circle(*pMatRgb, Point(100,100), 10, Scalar(5,128,255,255));
for( size_t i = 0; i < v.size(); i++ ) {
circle(*pMatRgb, Point(v[i].pt.x, v[i].pt.y), 10, Scalar(255,128,0,255));
}
}
}
In the java part I added the Mat
private Mat descriptor;
descriptor = new Mat();
The method getNativeObjAddr() does the trick. The Mat is allocated in java and its address is passed to the native code, thus there isn't any explicit returning.
FindFeatures(mGraySubmat.getNativeObjAddr(), mRgba.getNativeObjAddr(), descriptor.getNativeObjAddr());
Log.i("desc:" , descriptor.dump());
The Mat was filled with the required data and is directly accessible in the java code after the JNI invokation returns.
Somwhere else in the code the Mat is released:
if ( descriptor != null)
descriptor.release();
descriptor = null;
in C++
jlong funC(){
Mat *mat = new Mat();
//...
return (jlong)mat;
}
in java:
long = addr;// addr is return from c method funC()
Mat mat = new Mat(addr);
Attention: You must new Mat() in C,if you code is : Mat mat();mat object memory will be collect when funC() end.