<unknown> Error occured in detectAndCompute() - android

When I ran my code under debugger, it showed an <unknown> error in:
detector->detectAndCompute( matInput, noArray(), keypoints2, descriptors2 );
like this
`matInput` is from `inputFrame.rgba()`
Java code in :
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame)
and this is not null.
This is a part of
native-lib.cpp:
std::vector<Mat> trainImages;
std::vector< std::vector<DMatch> > knn_matches;
std::vector<KeyPoint> keypoints1;
std::vector<KeyPoint> keypoints2;
Mat matImg;
static void createKeypointsAndDescriptors(const Mat& matInput) {
int minHessian = 400;
cv::Ptr<SURF> detector = SURF::create( minHessian );
std::vector<KeyPoint> temp_keypoints;
std::vector<std::vector<KeyPoint>> temp_keypoints1;
Mat temp_descriptors;
std::vector<Mat> temp_descriptors1;
Mat descriptors2;
std::vector< std::vector<DMatch> > temp_knn_matches;
for(int i = 0; i < 2; i++) {
detector->detectAndCompute( trainImages[i], noArray(), temp_keypoints, temp_descriptors );
temp_keypoints1.push_back(temp_keypoints);
temp_descriptors1.push_back(temp_descriptors);
}
detector->detectAndCompute( matInput, noArray(), keypoints2, descriptors2 );
Ptr<DescriptorMatcher> matcher = DescriptorMatcher::create(DescriptorMatcher::FLANNBASED);
int max = 0;
for(int i = 0; i < 2; i++) {
if(temp_keypoints[i].size >= 2 && keypoints2.size() >= 2)
matcher->knnMatch( temp_descriptors1[i], descriptors2, temp_knn_matches, 2 );
if(max < temp_knn_matches.size()) {
max = temp_knn_matches.size();
keypoints1 = temp_keypoints1[i];
matImg = trainImages[i];
knn_matches = temp_knn_matches;
}
}
}
EDIT
Here's my example input image
This is a code related image. I send images from the asset directory to JNI by using java code.
extern "C"
JNIEXPORT void JNICALL
Java_com_example_surfwithflann2_MainActivity_sendImages(JNIEnv *env, jobject instance,
jlongArray tempAddrObj_) {
if(trainImages.size() == 0) {
int length = env->GetArrayLength(tempAddrObj_);
jlong *tempAddrObj = env->GetLongArrayElements(tempAddrObj_, NULL);
for(int i = 0; i < length; i++) {
cv::Mat &tempImage = *(cv::Mat *)tempAddrObj[i];
trainImages.push_back(tempImage);
}
env->ReleaseLongArrayElements(tempAddrObj_, tempAddrObj, 0);
} else {
__android_log_print(ANDROID_LOG_DEBUG, "TAG", "already received from java");
}
}

To narrow it down, try splitting detectAndCompute() into detect() and compute():
#include <stdio.h>
#include <iostream>
#include "opencv2/core.hpp"
#include "opencv2/features2d.hpp"
#include "opencv2/xfeatures2d.hpp"
#include "opencv2/highgui.hpp"
using namespace cv;
using namespace cv::xfeatures2d;
std::vector<cv::KeyPoint> keypoints2;
cv::Mat descriptors2;
static void createKeypointsAndDescriptors(const cv::Mat& matInput) {
//-- Step 1: Detect the keypoints using SURF Detector, compute the descriptors
double minHessian = 400;
Ptr<SURF> detector = SURF::create();
detector->setHessianThreshold(minHessian);
detector->detect(matInput, keypoints2);
detector->compute(matInput, keypoints2, descriptors2);
// detector->detectAndCompute( matInput, noArray(), keypoints2, descriptors2 );
...
}
links:
cv::xfeatures2d::SURF Class Reference
cv::Feature2D Class Reference
cv::xfeatures2d::AffineFeature2D Class Reference

Related

Place image on rectangle after detection in OpenCV

I'm currently developing an android application where user can choose a shirt they want and it will be placed on top of their body with the use of the rear camera. The detection part is done and working and down below are the codes I've made so far.
nerds_thesis_clartips_OpencvClass.h //header file for nerds_thesis_clartips_OpencvClass.cpp
#include <jni.h>
#include <opencv2/opencv.hpp>
#include <iostream>
#include <vector>
/* Header for class nerds_thesis_clartips_OpencvClass */
using namespace cv;
using namespace std;
#ifndef _Included_nerds_thesis_clartips_OpencvClass
#define _Included_nerds_thesis_clartips_OpencvClass
#ifdef __cplusplus
extern "C" {
#endif
/*
* Class: nerds_thesis_clartips_OpencvClass
* Method: humanDetection
* Signature: (J)V
*/
void detectHuman(Mat& frame);
JNIEXPORT void JNICALL Java_nerds_thesis_clartips_OpencvClass_humanDetection
(JNIEnv *, jclass, jlong);
#ifdef __cplusplus
}
#endif
#endif
nerds_thesis_clartips_OpencvClass.cpp //C++ File
#include "nerds_thesis_clartips_OpencvClass.h"
JNIEXPORT void JNICALL Java_nerds_thesis_clartips_OpencvClass_humanDetection (JNIEnv *, jclass, jlong addrRgba){
Mat& frame = *(Mat*)addrRgba;
detectHuman(frame);
}
void detectHuman(Mat& frame){
// assign xml file to a variable
String human_cascade_name = "/storage/emulated/0/data/haarcascade_upperbody.xml";
CascadeClassifier human_cascade;
// load xml file
if(!human_cascade.load( human_cascade_name ) ) { printf("--(!)Error loading\n"); return; };
std::vector<Rect> humans;
Mat frame_gray;
Mat original;
frame.copyTo(original);
//convert input to grayscale
cvtColor( frame, frame_gray, CV_BGR2GRAY );
//increase image contrast
equalizeHist( frame_gray, frame_gray);
//Detect Human
human_cascade.detectMultiScale( frame_gray, humans, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(100, 100) );
//for (int i=0; i<humans.size(); i++)
//rectangle(frame, Point(humans[i].x, humans[i].y), Point(humans[i].x+humans[i].width, humans[i].y+humans[i].height), Scalar(0,255,0));
Mat imageMask = imread("C:/Users/Requinala/AndroidStudioProjects/CLARTIPS/app/src/main/res/drawable/bluevelvet.png", 1);
// Draw the mask over all rectangles
for (size_t i = 0; i < humans.size(); i++){
Rect r = humans[i];
Mat humanROI = frame_gray( humans[i] ); //image of the upper body
int h_temp = humans[i].height; // storing original height
int x = humans[i].x;
int y = humans[i].y - h_temp*(-0.6); // y is increased by 0.6*h
int w = humans[i].width;
int h = h_temp; // height detected
rectangle(frame,Point (x,y),Point(x + w,y +h),Scalar(255,0,255),1,4,0);
/*int xx =0, yy =0;
// Just iterate in face region pixel by pixel
for(int x = humans[i].x; x < humans[i].x+humans[i].width; x++){
for (int y = humans[i].y; y < humans[i].y+humans[i].height; y++){
//Copy Mask to Original image If the 0 chan
//Proper condition is over all color channels
//if (imageMask.at(xx,yy)[0] < 10){
// Copy to original image on (y,x) places the pixel of xx,yy mask
humanROI.at(y,x)[0] = imageMask.at(xx,yy)[0];
humanROI.at(y,x)[1] = imageMask.at(xx,yy)[1];
humanROI.at(y,x)[2] = imageMask.at(xx,yy)[2];
//}
// Iterate in mask x
xx =xx+1;
}
// iterate next row of imageMask
xx = 0;
yy =yy+1;
}*/
}
}
However, I'm getting errors:
no matching member function for call to 'at'
What should I do? Any help/ideas will be appreciated.
By looking at your code:
//if (imageMask.at(xx,yy)[0] < 10){
// Copy to original image on (y,x) places the pixel of xx,yy mask
humanROI.at(y,x)[0] = imageMask.at(xx,yy)[0];
humanROI.at(y,x)[1] = imageMask.at(xx,yy)[1];
humanROI.at(y,x)[2] = imageMask.at(xx,yy)[2];
//}
it is easy to spot the problem. You are missing the template specification.
you can change it to the following:
//if (imageMask.at(xx,yy)[0] < 10){
// Copy to original image on (y,x) places the pixel of xx,yy mask
humanROI.at<uchar>(y,x)[0] = imageMask.at<uchar>(xx,yy)[0];
humanROI.at<uchar>(y,x)[1] = imageMask.at<uchar>(xx,yy)[1];
humanROI.at<uchar>(y,x)[2] = imageMask.at<uchar>(xx,yy)[2];
//}
or even better:
//if (imageMask.at(xx,yy)[0] < 10){
// Copy to original image on (y,x) places the pixel of xx,yy mask
humanROI.at<Vec3b>(y,x) = imageMask.at<Vec3b>(xx,yy);
//}
That should solve your compiling error, however the humanROI is in greyscale, you can do:
Mat humanColorROI;
cvtColor( humanROI, humanColorROI, CV_GRAY2BGR );
To get a 3 channel grey image. This way it should work.
P.S.: Regarding your stopping after detecting the body, it could be that it crashes or that you are just calling this function once? Is better to make another question for you to receive help from more members and for other members to find the answer quicker.

Deserializing shape_predictor_68_face_landmarks.dat only for once for facial landmark detection in real time in android studio

I am trying to detect facial landmark using opencv ,dlib library in android studio.
I can capture image and detect landmarks from the image.
But I am facing problem when I am trying to detect facial landmark in real time.
Because I am deserializing shape_predictor_68_face_landmarks.dat file for each frame.
onCameraFrame method of MainActivity.Java
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame)
{
matInput =inputFrame.rgba();
NativeClass.LandmarkDetection(matInput.getNativeObjAddr(), matOutput.getNativeObjAddr());
return matOutput;
}
NativeClass.Java
package com.example.user.drowsinessdetection;
public class NativeClass {
public native static String getMessage();
public native static void LandmarkDetection(long addrInput,long addrOutput);
}
The cpp file for Landmark detection:
com_example_user_drowsinessdetection_NativeClass.cpp
#include <com_example_user_drowsinessdetection_NativeClass.h>
#include <dlib/geometry/rectangle.h>
JNIEXPORT jstring JNICALL
Java_com_example_user_drowsinessdetection_NativeClass_getMessage
(JNIEnv *env, jclass){
env->NewStringUTF("JNI message");
}
JNIEXPORT void JNICALL
Java_com_example_user_drowsinessdetection_NativeClass_LandmarkDetection
(JNIEnv *env, jclass thiz, jlong addrInput, jlong addrOutput){
Mat& image = *(Mat*)addrInput;
Mat& dst = *(Mat*)addrOutput;
faceDetectionDlib(image, dst);
}
void faceDetectionDlib(Mat& img, Mat& dst){
try {
frontal_face_detector detector = get_frontal_face_detector();
shape_predictor pose_model;
deserialize("storage/emulated/0/shape_predictor_68_face_landmarks.dat")>>pose_model;
cv_image<bgr_pixel>cimg(img);
std::vector<dlib::rectangle> faces = detector(cimg);
std::vector<full_object_detection> shapes;
int k=faces.size();
for(unsigned long i = 0;i < k; ++i)
shapes.push_back(pose_model(cimg,faces[i]));
dst = img.clone();
renderToMat(shapes, dst);
}
catch (serialization_error& e)
{
cout<<endl<<e.what()<<endl;
}
}
void renderToMat(std::vector<full_object_detection>& dets, Mat& dst){
Scalar color;
int sz = 3,l;
color = Scalar(0,255,0);
//chin line
l=dets.size();
for(unsigned long idx = 0; idx < l; idx++) {
//left eye
for (unsigned long i = 37; i <= 41; ++i)
cv::line(dst, Point(dets[idx].part(i).x(), dets[idx].part(i).y()),
Point(dets[idx].part(i - 1).x(), dets[idx].part(i - 1).y()), color, sz);
cv::line(dst, Point(dets[idx].part(36).x(), dets[idx].part(36).y()),
Point(dets[idx].part(41).x(), dets[idx].part(41).y()), color, sz);
//right eye
for (unsigned long i = 43; i <= 47; ++i)
cv::line(dst, Point(dets[idx].part(i).x(), dets[idx].part(i).y()),
Point(dets[idx].part(i - 1).x(), dets[idx].part(i - 1).y()), color, sz);
cv::line(dst, Point(dets[idx].part(42).x(), dets[idx].part(42).y()),
Point(dets[idx].part(47).x(), dets[idx].part(47).y()), color, sz);
//lips out part
}
}
I don't want execute this line
frontal_face_detector detector = get_frontal_face_detector();
and
deserialize("storage/emulated/0/shape_predictor_68_face_landmarks.dat")>>pose_model;
for each and every frame.
So How can I do these only once and then use these for all other frames?
I am a newbie.
Please help.
Sorry for my poor English. :)

How to count time in a process JNI?

I want to make logging time which a JNI's cpp system needed for every process.This is my code :
/*
* ImageProcessing.cpp
*/
#include <jni.h>
#include <stdlib.h>
#include <stdio.h>
#include <string>
#include <android/log.h>
#include <strstream>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc_c.h>
#include <opencv2/objdetect/objdetect.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <vector>
#define LOG_TAG "TourGuide"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
using namespace std;
using namespace cv;
std::vector<float> parse_delimeted_list_of_numbers(char* line, char delimeter)
{
std::vector<float> vector_of_numbers;
std::istrstream input_stream(line);
std::string text;
float number;
while (std::getline(input_stream, text, delimeter)) {
sscanf(text.c_str(), "%f", &number, text.size());
vector_of_numbers.push_back(number);
}
return vector_of_numbers;
}
extern "C"
jboolean
Java_com_example_franksyesipangkar_tourguide_CameraPreview_ImageProcessing
(JNIEnv* env, jobject thiz, jint width, jint height, jbyteArray NV21FrameData, jintArray outPixels, jbyteArray b)
{
LOGD("JNIEnv");
//convert jbyteArray to char
jbyte *cmd = env->GetByteArrayElements(b, 0);
LOGD("JNIEnvFeature");
char feature[90600];//[819000] sejumlah 800kb untuk ukuran file
memset(feature,0, sizeof(feature));
memcpy(feature, cmd, strlen((char*)cmd));
LOGD("OutFeature: %s", feature);
//LOGD("OutCMD: %s", cmd);
vector<float> vectorHOGSVM;
vectorHOGSVM = parse_delimeted_list_of_numbers(feature, ' ');
LOGD("Parsing Vector Success ");
/* Mengambil data pointer */
jbyte * pNV21FrameData = env->GetByteArrayElements(NV21FrameData, 0);
jint * poutPixels = env->GetIntArrayElements(outPixels, 0);
/* Membuat matrix dari input frame gambar */
Mat mGray(height, width, CV_8UC1, (unsigned char *)pNV21FrameData);
Mat mResult(height, width, CV_8UC4, (unsigned char *)poutPixels);
/* Mengubah matrix kembali menjadi frame gambar */
IplImage GrayImg = mGray;
IplImage ResultImg = mResult;
HOGDescriptor hog;
//hog.winSize = Size(56,40);
// Set our custom detecting vector
hog.setSVMDetector(vectorHOGSVM);
//hog.setSVMDetector(HOGDescriptor::getDefaultPeopleDetector());
/* Deklarasi variable vector untuk menggambar kotak deteksi */
vector<Rect> found, found_filtered;
size_t i, j;
hog.detectMultiScale(mGray, found, 0, Size(8,8), Size(32,32), 1.05, 2);
double t = (double)getTickCount();
t = (double)getTickCount() - t;
LOGD("Detection Time: %gms", t*1000./cv::getTickFrequency());
LOGD("Animal: %d", found.size());
for( i = 0; i < found.size(); i++ )
{
Rect r = found[i];
for( j = 0; j < found.size(); j++ )
if( j != i && (r & found[j]) == r)
break;
if( j == found.size() )
found_filtered.push_back(r);
}
if(found.size()) {
Rect r = found[0];
r.x += cvRound(r.width*0.1);
r.width = cvRound(r.width*0.8);
r.y += cvRound(r.height*0.07);
r.height = cvRound(r.height*0.8);
LOGD("c : %d, r : %d",r.height,r.width);
cvCvtColor(&GrayImg, &ResultImg, CV_GRAY2BGR);
env->ReleaseByteArrayElements(NV21FrameData, pNV21FrameData, 0);
env->ReleaseIntArrayElements(outPixels, poutPixels, 0);
env->ReleaseByteArrayElements(b, cmd, 0);
return true;
}
For detection process, I make logging time with cv::getTickFrequency() (OpenCV function) like this :
double t = (double)getTickCount();
t = (double)getTickCount() - t;
LOGD("Detection Time: %gms", t*1000./cv::getTickFrequency());
But, in another process I don't understand for making log time.So, I want to make logging time in every process, in process LOGD("OutFeature: %s", feature);, and process LOGD("Parsing Vector Success ");.Do you have an idea for it ?

Leaf Disease Detection and Recognition using OpenCV

can you help me with this.
I was tasked to create an application using the OpenCV and c++ that would take in an image input of a plant leaf. This application would detect possible symptoms of disease like black/grey/brown spots from the leaf, or blights, lesions and etc. Each characteristic of disease such as color of the spots represents different diseases. After detecting the possible symptoms, the application will match it to the collection of template images from the application's database and will output a possible best match.
What methods do I have to use on this? I've researched Histogram Matching and Keypoint and Descriptor Matching but I'm not sure which one will work best.
I have found sample code using SURF and FLANN, but I don't know if this would be enough:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
using namespace cv;
void readme();
/**
* #function main
* #brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
/**
* #function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
Here are my questions:
What method do I have to use? Histogram Matching, Keypoint/Descriptor Matching or?
If I use Keypoint/Descriptor matching, what algorithm is best alternative to SURF and FLANN since I will be implementing it ALSO on an android platform? Do I still have to perform thresholding or segmentation? Will it not remove important details such as the color, shape or etc.? Please guys, suggests some steps to do this.
I think this way should give you good results:
Training process.
Extract LBP descriptors for exery pixel of image (can be computed
for color images too).
Compute histograms of LBP descriptors for each training sample.
Train classifier using histograms as inputs and labels as outputs.
Prediction process:
Extract LBP descriptors for exery pixel of new image.
Compute histogram of LBP descriptors for this image.
Feed historgam to classifier -> get results.
I've successfully used feed forward neural network as classifier, for solving similar problem.
You may find this book useful: ISBN 978-0-85729-747-1 "Computer Vision Using Local Binary Patterns"
Try this (computes LBP descriptors, there is also function for computation of histogram):
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include "opencv2/nonfree/nonfree.hpp"
#include <limits>
using namespace cv;
class myLBP
{
public:
uchar lut[256];
uchar null;
int radius;
int maxTransitions;
bool rotationInvariant;
myLBP(int _radius=1,int _maxTransitions=8,bool _rotationInvariant=false)
{
radius=_radius;
maxTransitions=_maxTransitions;
rotationInvariant=_rotationInvariant;
bool set[256];
uchar uid = 0;
for (int i=0; i<256; i++)
{
if (numTransitions(i) <= maxTransitions)
{
int id;
if (rotationInvariant)
{
int rie = rotationInvariantEquivalent(i);
if (i == rie)
{
id = uid++;
}
else
{
id = lut[rie];
}
}
else
{
id = uid++;
}
lut[i] = id;
set[i] = true;
}
else
{
set[i] = false;
}
}
null = uid;
for (int i=0; i<256; i++)
if (!set[i])
{
lut[i] = null; // Set to null id
}
}
/* Returns the number of 0->1 or 1->0 transitions in i */
static int numTransitions(int i)
{
int transitions = 0;
int curParity = i%2;
for (int j=1; j<=8; j++)
{
int parity = (i>>(j%8)) % 2;
if (parity != curParity)
{
transitions++;
}
curParity = parity;
}
return transitions;
}
static int rotationInvariantEquivalent(int i)
{
int min = std::numeric_limits<int>::max();
for (int j=0; j<8; j++)
{
bool parity = i % 2;
i = i >> 1;
if (parity)
{
i+=128;
}
min = std::min(min, i);
}
return min;
}
void process(const Mat &src, Mat &dst) const
{
Mat m;
src.convertTo(m, CV_32F);
assert(m.isContinuous() && (m.channels() == 1));
Mat n(m.rows, m.cols, CV_8UC1);
n = null; // Initialize to NULL LBP pattern
const float *p = (const float*)m.ptr();
for (int r=radius; r<m.rows-radius; r++)
{
for (int c=radius; c<m.cols-radius; c++)
{
const float cval = (p[(r+0*radius)*m.cols+c+0*radius]);
n.at<uchar>(r, c) = lut[(p[(r-1*radius)*m.cols+c-1*radius] >= cval ? 128 : 0) |
(p[(r-1*radius)*m.cols+c+0*radius] >= cval ? 64 : 0) |
(p[(r-1*radius)*m.cols+c+1*radius] >= cval ? 32 : 0) |
(p[(r+0*radius)*m.cols+c+1*radius] >= cval ? 16 : 0) |
(p[(r+1*radius)*m.cols+c+1*radius] >= cval ? 8 : 0) |
(p[(r+1*radius)*m.cols+c+0*radius] >= cval ? 4 : 0) |
(p[(r+1*radius)*m.cols+c-1*radius] >= cval ? 2 : 0) |
(p[(r+0*radius)*m.cols+c-1*radius] >= cval ? 1 : 0)];
}
}
dst=n.clone();
}
/* Returns the number of 1 bits in i */
static int bitCount(int i)
{
int count = 0;
for (int j=0; j<8; j++)
{
count += (i>>j)%2;
}
return count;
}
void draw(const Mat &src, Mat &dst) const
{
static Mat hueLUT, saturationLUT, valueLUT;
if (!hueLUT.data)
{
const int NUM_COLORS = 10;
hueLUT.create(1, 256, CV_8UC1);
hueLUT.setTo(0);
uchar uid = 0;
for (int i=0; i<256; i++)
{
const int transitions = numTransitions(i);
int u2;
if (transitions <= 2)
{
u2 = uid++;
}
else
{
u2 = 58;
}
// Assign hue based on bit count
int color = bitCount(i);
if (transitions > 2)
{
color = NUM_COLORS-1;
}
hueLUT.at<uchar>(0, u2) = 255.0*(float)color/(float)NUM_COLORS;
}
saturationLUT.create(1, 256, CV_8UC1);
saturationLUT.setTo(255);
valueLUT.create(1, 256, CV_8UC1);
valueLUT.setTo(255.0*(3.0/4.0));
}
if (src.type() != CV_8UC1)
{
std::cout << "Expected 8UC1 source type.";
}
Mat hue, saturation, value;
LUT(src, hueLUT, hue);
LUT(src, saturationLUT, saturation);
LUT(src, valueLUT, value);
std::vector<Mat> mv;
mv.push_back(hue);
mv.push_back(saturation);
mv.push_back(value);
Mat coloredU2;
merge(mv, coloredU2);
cvtColor(coloredU2, dst, cv::COLOR_HSV2BGR);
}
};
void Hist(const Mat &src, Mat &dst,float max=256, float min=0,int dims=-1)
{
std::vector<Mat> mv;
split(src, mv);
Mat m(mv.size(), dims, CV_32FC1);
for (size_t i=0; i<mv.size(); i++)
{
int channels[] = {0};
int histSize[] = {dims};
float range[] = {min, max};
const float* ranges[] = {range};
Mat hist, chan = mv[i];
// calcHist requires F or U, might as well convert just in case
if (mv[i].depth() != CV_8U && mv[i].depth() != CV_32F)
{
mv[i].convertTo(chan, CV_32F);
}
calcHist(&chan, 1, channels, Mat(), hist, 1, histSize, ranges);
memcpy(m.ptr(i), hist.ptr(), dims * sizeof(float));
}
dst=m.clone();
}
int main(int argc, char* argv[])
{
cv::initModule_nonfree();
cv::namedWindow("result");
cv::Mat bgr_img = cv::imread("D:\\ImagesForTest\\lena.jpg");
if (bgr_img.empty())
{
exit(EXIT_FAILURE);
}
cv::Mat gray_img;
cv::cvtColor(bgr_img, gray_img, cv::COLOR_BGR2GRAY);
cv::normalize(gray_img, gray_img, 0, 255, cv::NORM_MINMAX);
myLBP lbp(1,2);
Mat lbp_img;
lbp.process(gray_img,lbp_img);
lbp.draw(lbp_img,bgr_img);
//for(int i=0;i<lbp_img.rows;++i)
imshow("result",bgr_img);
cv::waitKey();
return 0;
}

Accessing to OpenCv cell values of Mat from native c++ function of android app

:)
I'm programming an algorithm for detect symmetric radial center of an image, to detect eyes position into face framePicture. I know that already exist a project of public domain that do this work, but i would base my work about another kind of studies.
This is the scenario:
Doing by hand this manipulation frame by frame, i have seen that writting code on java layer, like this:
private Mat mGray = new Mat(height,width,CvType.CV_8U);
private Mat mOut = new Mat(height,width,CvType.CV_8U);
private Mat mIntermediateMat = Mat.zeros(height,width,CvType.CV_32F);
[...common methods of opencv app...]
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
switch (ImageManipulationsActivity.viewMode) {
case ImageManipulationsActivity.VIEW_MODE_RGBA:
mOut = inputFrame.rgba();
break;
case ImageManipulationsActivity.VIEW_MODE_MODIFY:
mGray = inputFrame.gray();
int h = mGray.rows();
int w = mGray.cols();
int sobxVal,sobyVal;
/**
* Appling manually sobel filtering to calculate dx and dy image,
* moreover calculate magnitudo matrix and cosValue and sinValue
* matrices for computing, using lut techniques.
*/
for(int i = 1; i < h-1; i++)
for(int j = 1; j < w-1; j++) {
sobxVal = (int) (
((int)mGray.get(i-1,j)[0] << 1) +
mGray.get(i-1,j-1)[0] +
mGray.get(i-1,j+1)[0] - (
((int)mGray.get(i+1,j)[0] << 1) +
mGray.get(i+1,j-1)[0] +
mGray.get(i+1,j+1)[0] ) );
sobyVal = (int) (
((int)mGray.get(i,j-1)[0] << 1) +
mGray.get(i-1,j-1)[0] +
mGray.get(i+1,j-1)[0] - (
((int)mGray.get(i,j+1)[0] << 1) +
mGray.get(i-1,j+1)[0] +
mGray.get(i+1,j+1)[0] ) );
// compute magnitudo and atan2
}
// ...other calculations...
Core.convertScaleAbs(mIntermediateMat, mOut);
}
return mOut;
}
is not all efficient! So I decided to write c++ for a native function to manipulate matrix in this way:
Code by c++ side
#include <jni.h>
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <math.h>
#include <vector>
#include <android/log.h>
#define LOG_TAG "Example Filter"
#define LOGD(...) ((void)__android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, __VA_ARGS__))
#define RADIUS 10
#define _K 9.9
#define _A 2 //radial strictness parameter, found experimentally
using namespace std;
using namespace cv;
extern "C" {
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindFeatures(
JNIEnv* env,
jobject,
jlong addrGray,
jlong addrRgba,
jlong addrlutCosSin,
jlong addrlutM );
JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial2_Tutorial2Activity_FindFeatures(
JNIEnv* env,
jobject,
jlong addrGray,
jlong addrOut,
jlong addrlutCosSin,
jlong addrlutM )
{
Mat& mGr = *(Mat*)addrGray;
Mat& mOut = *(Mat*)addrOut;
Mat& lutCosSin = *(Mat*)addrlutCosSin;
Mat& lutM = *(Mat*)addrlutM;
int w = mGr.cols;
int h = mGr.rows;
double sobelxVal,sobelyVal,angle;
Mat magnitudo(h,w,CV_32F,Scalar(0));
Mat xMat(h,w,CV_8S,Scalar(0));
Mat yMat(h,w,CV_8S,Scalar(0));
Mat oMat(h,w,CV_32F,Scalar(0));
Mat mMat(h,w,CV_32F,Scalar(0));
/*
* Convolves Matrix with Sobel ky kernel and Sobel kx kernel
*ky = [ 1 2 1 ;
* 0 0 0 ;
* -1 -2 -1 ]
*
*kx = [ 1 0 -1 ;
* 2 0 -2 ;
* 1 0 -1 ]
*
* doing dedicated computation
*/
for( int i = 1; i < h-1; i++ )
{
for (int j = 1; j < w-1; j++ )
{
sobelxVal = (mGr.at<int>(i-1,j) << 1) +
mGr.at<int>(i-1,j-1) +
mGr.at<int>(i-1,j+1) - (
(mGr.at<int>(i+1,j) << 1) +
mGr.at<int>(i+1,j-1) +
mGr.at<int>(i+1,j+1) );
sobelyVal = (mGr.at<int>(i,j-1) << 1) +
mGr.at<int>(i-1,j-1) +
mGr.at<int>(i+1,j-1) - (
(mGr.at<int>(i,j+1) << 1) +
mGr.at<int>(i-1,j+1) +
mGr.at<int>(i+1,j+1) );
magnitudo.at<double>(i,j) = lutM.at<double>((int)sobelxVal+255/4,(int)sobelxVal+255/4);
angle = floor(atan2(sobelyVal,sobelxVal)*180/M_PI);
xMat.at<double>(i,j) = lutCosSin.at<double>(0,angle);
yMat.at<double>(i,j) = lutCosSin.at<double>(1,angle);
}
}
// other code calculation to determine mOut matrix values
}
}
By java code side
private Mat mRgba;
private Mat mGray;
/*
* Matrix of 360 cols and 2 rows
* row[0] cos values
* row[1] sin values
*/
private static Mat lutCosSin;
/*
* Matrix 510 x 510
* where lutM(i,j) = atan2(i,j)
*/
private static Mat lutMagnitudo
// common methods and declarations...
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
final int viewMode = mViewMode;
switch (viewMode) {
case VIEW_MODE_RGBA:
// input frame has RBGA format
mRgba = inputFrame.rgba();
break;
case VIEW_MODE_FEATURES:
// input frame has RGBA format
mGray = inputFrame.gray();
mRgba = Mat.zeros(mGray.rows(), mGray.cols(), mGray.type());
FindFeatures(
mGray.getNativeObjAddr(),
mRgba.getNativeObjAddr(),
lutCosSin.getNativeObjAddr(),
lutMagnitudo.getNativeObjAddr()
);
//Core.convertScaleAbs(mRgba, mRgba);
// Log.d(TAG, "Called native function :"+mRgba.submat(new Range(0,5), new Range(0,5)).toString()+
// "\nAngles matrix:"+mGray);
break;
}
return mRgba;
}
public native void FindFeatures(long matAddrGr, long matAddrRgba, long matAddrlutCS, long matAddrlutM);
the first important issue of this snipped code is that on accessing to mGr cells with "at" method in this way:
mGr.at<int>(i,j)
previus check that mGr type is int, the returned values isn't the effective gray level pixel of gray frame (i saw it by log).
I suppose that there are linkin bug of matrix from java code to c++ code, but i'm not sure for this.
I hope may anyone help me to solve this issue XD !!

Categories

Resources