i'm trying to track objects with Optical flow in android after using a Haar Cascade detection like in the code below and i have this error can anyone help me with this
E/cv::error(): OpenCV(3.4.12) Error: Assertion failed ((npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0) in virtual void cv::{anonymous}::SparsePyrLKOpticalFlowImpl::calc(cv::InputArray, cv::InputArray, cv::InputArray, cv::InputOutputArray, cv::OutputArray, cv::OutputArray), file /build/3_4_pack-android/opencv/modules/video/src/lkpyramid.cpp, line 1259
E/org.opencv.video: video::calcOpticalFlowPyrLK_15() caught cv::Exception: OpenCV(3.4.12) /build/3_4_pack-android/opencv/modules/video/src/lkpyramid.cpp:1259: error: (-215:Assertion failed) (npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0 in function 'virtual void cv::{anonymous}::SparsePyrLKOpticalFlowImpl::calc(cv::InputArray, cv::InputArray, cv::InputArray, cv::InputOutputArray, cv::OutputArray, cv::OutputArray)'
E/AndroidRuntime: FATAL EXCEPTION: Thread-2
Process: opencv.org, PID: 31380
CvException [org.opencv.core.CvException: cv::Exception: OpenCV(3.4.12) /build/3_4_pack-android/opencv/modules/video/src/lkpyramid.cpp:1259: error: (-215:Assertion failed) (npoints = prevPtsMat.checkVector(2, CV_32F, true)) >= 0 in function 'virtual void cv::{anonymous}::SparsePyrLKOpticalFlowImpl::calc(cv::InputArray, cv::InputArray, cv::InputArray, cv::InputOutputArray, cv::OutputArray, cv::OutputArray)'
]
enter code hereMatOfRect cars = new MatOfRect();
if (mDetectorType == JAVA_DETECTOR) {
if (mJavaDetector != null) {
mJavaDetector.detectMultiScale(matGray, cars,1.1, 2, 2,
// TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteCarSize, mAbsoluteCarSize), new Size());
}
}
else {
Log.e(TAG, "Detection method is not selected!");
}
Rect[] carsArray = cars.toArray();
ArrayList<Point> CurrentCars = new ArrayList<>();
for (int i = 0; i < carsArray.length; i++)
{
Imgproc.rectangle(matRGB, carsArray[i].tl(), carsArray[i].br(),
CAR_RECT_COLOR, 3);
xCenter = (carsArray[i].x + carsArray[i].width + carsArray[i].x) / 2;
yCenter = (carsArray[i].y + carsArray[i].y + carsArray[i].height) / 2;
Point center = new Point(xCenter, yCenter);
CurrentCars.add(center);
Imgproc.putText(matRGB, "[" + center.x + "," + center.y + "]",
new Point(center.x + 20, center.y + 20),
Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255,
255));
}
if (previousPoints.empty()){
previousPoints.fromList(CurrentCars);
matGray.copyTo(matPrevGray);
}
currentPoints = new MatOfPoint2f();
MatOfByte status = new MatOfByte();
MatOfFloat err = new MatOfFloat();
TermCriteria criteria = new TermCriteria(TermCriteria.COUNT + TermCriteria.EPS,10,0.03);
Video.calcOpticalFlowPyrLK(matPrevGray,matGray,previousPoints,currentPoints,status,err);
byte StatusArr[] = status.toArray();
Point p0Arr[] = previousPoints.toArray();
Point p1Arr[] = currentPoints.toArray();
ArrayList<Point> good_new = new ArrayList<>();
for (int i = 0; i<StatusArr.length ; i++ ) {
if (StatusArr[i] == 1) {
good_new.add(p1Arr[i]);
Imgproc.line(matRGB, p1Arr[i], p0Arr[i], new Scalar(255,255,0,0),2);
Imgproc.circle(matRGB, p1Arr[i],5, new Scalar(255,255,0,0),-1);
}
}
currentPoints.copyTo(previousPoints);
matGray.copyTo(matPrevGray);
matPrevGray is empty. that's what it's saying.
Related
I'm new to OpenCV on Android and try to do Perspective Transform but I don't know how to use getperspectivetransform() and warpperspective() functions.I could detect rectangle from an image, but don't know how to warp.
Here is the detect rectangle function:
Mat tempMat = new Mat();
Mat src = new Mat();
Utils.bitmapToMat(image, tempMat);
Imgproc.cvtColor(tempMat, src, Imgproc.COLOR_BGR2RGB);
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
List<Mat> blurredChannel = new ArrayList<Mat>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<Mat>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve = new MatOfPoint2f();
double maxArea = 0;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = { c, 0 };
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 50, 50, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1
// ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel,
Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,
Imgproc.THRESH_BINARY,
(src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(),
Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve,
Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4),
curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.45) {
maxArea = area;
maxId = contours.indexOf(contour);
}
}
}
}
}
I draw rectangle with this statement.
if (maxId >= 0) {
Rect rect = Imgproc.boundingRect(contours.get(maxId));
Imgproc.rectangle(src, rect.tl(), rect.br(), new Scalar(255, 0, 0,
.8), 4);
}
After that I convert mat to bitmap and show on an imageview.
Here is the screenshot
So my problem is warpping, How can I warp the rectangle and rotate it?
and If it is possible, how can I improve detecting rectangle? Any hints?
(OpenCV Android SDK Ver: 3.41, Android Studio Ver: 3.01)
If you are looking to warp the detected contour into rectangle,
Get the contours of the rectangle
find convex hull of the contour
Using approxPolyDP reduce the convex hull points into 4 points
fit line to consecutive points (ex, if pts is the array, lines are fit as follows l1 = line Between(pts[0], pts[1]), l2 = line Between(pts[1], pts[2]), l3 = line Between(pts[2], pts[3]), l4 = lineBetween(pts[3], pts[0])
find the intersection between these lines, you'll end up with four points
Order the points in clockwise order (inputCorners = TopLeft, TopRight, BottomRight, BottomLeft)
create an output image with needed resolution and make the corner points in the same clockwise order ((0,0), (0, cols), (rows, cols), (rows, 0))
find homography using the function
Mat homography = Calib3d.findHomography(inputCorners, imageCorners, Calib3d.RANSAC, 10);
using the output homography matrix, warp the input image using the function
Imgproc.warpPerspective(image, outputMat, homography, new Size(image.cols(), image.rows()));
you can refer to the following link
This is my kotlin extensin version you can use it in your projects.
fun Bitmap.perspectiveTransform(srcPoints: List<org.opencv.core.Point>) :
Bitmap{
val dstWidth = max(
srcPoints[0].distanceFrom(srcPoints[1]),
srcPoints[2].distanceFrom(srcPoints[3])
)
val dstHeight = max(
srcPoints[0].distanceFrom(srcPoints[2]),
srcPoints[1].distanceFrom(srcPoints[3])
)
val dstPoints: List<org.opencv.core.Point> = listOf(
org.opencv.core.Point(0.0, 0.0),
org.opencv.core.Point(dstWidth, 0.0),
org.opencv.core.Point(0.0, dstHeight),
org.opencv.core.Point(dstWidth, dstHeight)
)
return try {
val srcMat = Converters.vector_Point2d_to_Mat(srcPoints)
val dstMat = Converters.vector_Point2d_to_Mat(dstPoints)
val perspectiveTransformation =
Imgproc.getPerspectiveTransform(srcMat, dstMat)
val inputMat = Mat(this.height, this.width, CvType.CV_8UC1)
Utils.bitmapToMat(this, inputMat)
val outPutMat = Mat(dstHeight.toInt(), dstWidth.toInt(), CvType.CV_8UC1)
Imgproc.warpPerspective(
inputMat,
outPutMat,
perspectiveTransformation,
Size(dstWidth, dstHeight)
)
val outPut = Bitmap.createBitmap(
dstWidth.toInt(),
dstHeight.toInt(), Bitmap.Config.RGB_565
)
//Imgproc.cvtColor(outPutMat , outPutMat , Imgproc.COLOR_GRAY2BGR)
Utils.matToBitmap(outPutMat , outPut)
outPut
}
catch ( e : Exception){
e.printStackTrace()
this
}
}
To use distance from I write another extension function
fun org.opencv.core.Point.distanceFrom(srcPoint: org.opencv.core.Point):
Double {
val w1 = this.x - srcPoint.x
val h1 = this.y - srcPoint.y
val distance = w1.pow(2) + h1.pow(2)
return sqrt(distance)
}
Also in this answer the correct src Points indices are :
0 : topleft
1 : topRight
2 : bottomLeft
3 : bottomRight
Good luck
I'm trying transform perspective automatically, I'm using a manual from HERE
Exactly the chapter 4: Flexible perspective transform
When I get the image to transform I get the next error:
Error
11-04 13:34:25.759: E/cv::error()(17332): OpenCV Error: Assertion failed (count >= 0 && (depth == CV_32F || depth == CV_32S)) in double cv::arcLength(cv::InputArray, bool), file /home/maksim/workspace/android-pack/opencv/modules/imgproc/src/shapedescr.cpp, line 301
11-04 13:34:25.759: E/org.opencv.imgproc(17332): imgproc::arcLength_10() caught cv::Exception: /home/maksim/workspace/android-pack/opencv/modules/imgproc/src/shapedescr.cpp:301: error: (-215) count >= 0 && (depth == CV_32F || depth == CV_32S) in function double cv::arcLength(cv::InputArray, bool)
11-04 13:34:25.759: E/AndroidRuntime(17332): FATAL EXCEPTION: main
11-04 13:34:25.759: E/AndroidRuntime(17332): Process: com.example.alvaro.opencv, PID: 17332
11-04 13:34:25.759: E/AndroidRuntime(17332): CvException [org.opencv.core.CvException: cv::Exception: /home/maksim/workspace/android-pack/opencv/modules/imgproc/src/shapedescr.cpp:301: error: (-215) count >= 0 && (depth == CV_32F || depth == CV_32S) in function double cv::arcLength(cv::InputArray, bool)
11-04 13:34:25.759: E/AndroidRuntime(17332): ]
The code
Mat gray = new Mat();
Imgproc.cvtColor(sampledImage, gray, Imgproc.COLOR_RGB2GRAY, 0);
Imgproc.GaussianBlur(gray, gray, new Size(7, 7), 0);
Mat edgeImage = new Mat();
Imgproc.Canny(gray, edgeImage, 100, 200);//1500,300
Mat lines = new Mat();
int threshold = 100;//200
Imgproc.HoughLinesP(edgeImage, lines, 1, Math.PI / 180, threshold, 60, 10);//100,60
ArrayList<org.opencv.core.Point> corners = new ArrayList<org.opencv.core.Point>();
//Find the intersection of the four lines to get the four corners
for (int i = 0; i < lines.cols(); i++)
{
for (int j = i + 1; j < lines.cols(); j++)
{
org.opencv.core.Point intersectionPoint = getLinesIntersection(lines.get(0, i), lines.get(0, j));
if (intersectionPoint != null)
{
Log.i(TAG, "intersectionPoint: " + intersectionPoint.x + " " + intersectionPoint.y);
corners.add(intersectionPoint);
}
}
}
MatOfPoint2f approxCorners=new MatOfPoint2f();
MatOfPoint2f cornersMat=new MatOfPoint2f();
cornersMat.fromList(corners);
//ERROR LINE on "Imgproc.arcLength"
double approxDistance = Imgproc.arcLength(cornersMat, true) * 0.02;
Imgproc.approxPolyDP(cornersMat, approxCorners,approxDistance, true);
What I tried
I tried change image 32bits to 8bits (or another one) without results
To use another method to her edges.
I found HERE the same issue, but... really I can't found the solution...
I'm really don't know how I can solve it.
int match_method = Imgproc.TM_CCOEFF;
int result_cols = labReport.cols() - textMat.cols() + 1;
int result_rows = labReport.rows() - textMat.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1);
Imgproc.matchTemplate(labReport, textMat, result, match_method);
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
MinMaxLocResult mmr = Core.minMaxLoc(result);
Point matchLoc;
if (match_method == Imgproc.TM_SQDIFF
|| match_method == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}
return matchLoc;
I'm using this code. I am trying to find a template in my image, however if I use a template that I know is not in the image, Imgproc.matchTemplate, it will return me a point as a result.
I need something like threshold for this. I need similarity more than 80% and if it's not, then return me some value like null.
can you help me with this.
I was tasked to create an application using the OpenCV and c++ that would take in an image input of a plant leaf. This application would detect possible symptoms of disease like black/grey/brown spots from the leaf, or blights, lesions and etc. Each characteristic of disease such as color of the spots represents different diseases. After detecting the possible symptoms, the application will match it to the collection of template images from the application's database and will output a possible best match.
What methods do I have to use on this? I've researched Histogram Matching and Keypoint and Descriptor Matching but I'm not sure which one will work best.
I have found sample code using SURF and FLANN, but I don't know if this would be enough:
#include <stdio.h>
#include <iostream>
#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/features2d.hpp"
using namespace cv;
void readme();
/**
* #function main
* #brief Main function
*/
int main( int argc, char** argv )
{
if( argc != 3 )
{ readme(); return -1; }
Mat img_1 = imread( argv[1], CV_LOAD_IMAGE_GRAYSCALE );
Mat img_2 = imread( argv[2], CV_LOAD_IMAGE_GRAYSCALE );
if( !img_1.data || !img_2.data )
{ std::cout<< " --(!) Error reading images " << std::endl; return -1; }
//-- Step 1: Detect the keypoints using SURF Detector
int minHessian = 400;
SurfFeatureDetector detector( minHessian );
std::vector<KeyPoint> keypoints_1, keypoints_2;
detector.detect( img_1, keypoints_1 );
detector.detect( img_2, keypoints_2 );
//-- Step 2: Calculate descriptors (feature vectors)
SurfDescriptorExtractor extractor;
Mat descriptors_1, descriptors_2;
extractor.compute( img_1, keypoints_1, descriptors_1 );
extractor.compute( img_2, keypoints_2, descriptors_2 );
//-- Step 3: Matching descriptor vectors using FLANN matcher
FlannBasedMatcher matcher;
std::vector< DMatch > matches;
matcher.match( descriptors_1, descriptors_2, matches );
double max_dist = 0; double min_dist = 100;
//-- Quick calculation of max and min distances between keypoints
for( int i = 0; i < descriptors_1.rows; i++ )
{ double dist = matches[i].distance;
if( dist < min_dist ) min_dist = dist;
if( dist > max_dist ) max_dist = dist;
}
printf("-- Max dist : %f \n", max_dist );
printf("-- Min dist : %f \n", min_dist );
//-- Draw only "good" matches (i.e. whose distance is less than 2*min_dist,
//-- or a small arbitary value ( 0.02 ) in the event that min_dist is very
//-- small)
//-- PS.- radiusMatch can also be used here.
std::vector< DMatch > good_matches;
for( int i = 0; i < descriptors_1.rows; i++ )
{ if( matches[i].distance <= max(2*min_dist, 0.02) )
{ good_matches.push_back( matches[i]); }
}
//-- Draw only "good" matches
Mat img_matches;
drawMatches( img_1, keypoints_1, img_2, keypoints_2,
good_matches, img_matches, Scalar::all(-1), Scalar::all(-1),
vector<char>(), DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );
//-- Show detected matches
imshow( "Good Matches", img_matches );
for( int i = 0; i < (int)good_matches.size(); i++ )
{ printf( "-- Good Match [%d] Keypoint 1: %d -- Keypoint 2: %d \n", i, good_matches[i].queryIdx, good_matches[i].trainIdx ); }
waitKey(0);
return 0;
}
/**
* #function readme
*/
void readme()
{ std::cout << " Usage: ./SURF_FlannMatcher <img1> <img2>" << std::endl; }
Here are my questions:
What method do I have to use? Histogram Matching, Keypoint/Descriptor Matching or?
If I use Keypoint/Descriptor matching, what algorithm is best alternative to SURF and FLANN since I will be implementing it ALSO on an android platform? Do I still have to perform thresholding or segmentation? Will it not remove important details such as the color, shape or etc.? Please guys, suggests some steps to do this.
I think this way should give you good results:
Training process.
Extract LBP descriptors for exery pixel of image (can be computed
for color images too).
Compute histograms of LBP descriptors for each training sample.
Train classifier using histograms as inputs and labels as outputs.
Prediction process:
Extract LBP descriptors for exery pixel of new image.
Compute histogram of LBP descriptors for this image.
Feed historgam to classifier -> get results.
I've successfully used feed forward neural network as classifier, for solving similar problem.
You may find this book useful: ISBN 978-0-85729-747-1 "Computer Vision Using Local Binary Patterns"
Try this (computes LBP descriptors, there is also function for computation of histogram):
#include <iostream>
#include <opencv2/opencv.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/features2d/features2d.hpp>
#include "opencv2/nonfree/nonfree.hpp"
#include <limits>
using namespace cv;
class myLBP
{
public:
uchar lut[256];
uchar null;
int radius;
int maxTransitions;
bool rotationInvariant;
myLBP(int _radius=1,int _maxTransitions=8,bool _rotationInvariant=false)
{
radius=_radius;
maxTransitions=_maxTransitions;
rotationInvariant=_rotationInvariant;
bool set[256];
uchar uid = 0;
for (int i=0; i<256; i++)
{
if (numTransitions(i) <= maxTransitions)
{
int id;
if (rotationInvariant)
{
int rie = rotationInvariantEquivalent(i);
if (i == rie)
{
id = uid++;
}
else
{
id = lut[rie];
}
}
else
{
id = uid++;
}
lut[i] = id;
set[i] = true;
}
else
{
set[i] = false;
}
}
null = uid;
for (int i=0; i<256; i++)
if (!set[i])
{
lut[i] = null; // Set to null id
}
}
/* Returns the number of 0->1 or 1->0 transitions in i */
static int numTransitions(int i)
{
int transitions = 0;
int curParity = i%2;
for (int j=1; j<=8; j++)
{
int parity = (i>>(j%8)) % 2;
if (parity != curParity)
{
transitions++;
}
curParity = parity;
}
return transitions;
}
static int rotationInvariantEquivalent(int i)
{
int min = std::numeric_limits<int>::max();
for (int j=0; j<8; j++)
{
bool parity = i % 2;
i = i >> 1;
if (parity)
{
i+=128;
}
min = std::min(min, i);
}
return min;
}
void process(const Mat &src, Mat &dst) const
{
Mat m;
src.convertTo(m, CV_32F);
assert(m.isContinuous() && (m.channels() == 1));
Mat n(m.rows, m.cols, CV_8UC1);
n = null; // Initialize to NULL LBP pattern
const float *p = (const float*)m.ptr();
for (int r=radius; r<m.rows-radius; r++)
{
for (int c=radius; c<m.cols-radius; c++)
{
const float cval = (p[(r+0*radius)*m.cols+c+0*radius]);
n.at<uchar>(r, c) = lut[(p[(r-1*radius)*m.cols+c-1*radius] >= cval ? 128 : 0) |
(p[(r-1*radius)*m.cols+c+0*radius] >= cval ? 64 : 0) |
(p[(r-1*radius)*m.cols+c+1*radius] >= cval ? 32 : 0) |
(p[(r+0*radius)*m.cols+c+1*radius] >= cval ? 16 : 0) |
(p[(r+1*radius)*m.cols+c+1*radius] >= cval ? 8 : 0) |
(p[(r+1*radius)*m.cols+c+0*radius] >= cval ? 4 : 0) |
(p[(r+1*radius)*m.cols+c-1*radius] >= cval ? 2 : 0) |
(p[(r+0*radius)*m.cols+c-1*radius] >= cval ? 1 : 0)];
}
}
dst=n.clone();
}
/* Returns the number of 1 bits in i */
static int bitCount(int i)
{
int count = 0;
for (int j=0; j<8; j++)
{
count += (i>>j)%2;
}
return count;
}
void draw(const Mat &src, Mat &dst) const
{
static Mat hueLUT, saturationLUT, valueLUT;
if (!hueLUT.data)
{
const int NUM_COLORS = 10;
hueLUT.create(1, 256, CV_8UC1);
hueLUT.setTo(0);
uchar uid = 0;
for (int i=0; i<256; i++)
{
const int transitions = numTransitions(i);
int u2;
if (transitions <= 2)
{
u2 = uid++;
}
else
{
u2 = 58;
}
// Assign hue based on bit count
int color = bitCount(i);
if (transitions > 2)
{
color = NUM_COLORS-1;
}
hueLUT.at<uchar>(0, u2) = 255.0*(float)color/(float)NUM_COLORS;
}
saturationLUT.create(1, 256, CV_8UC1);
saturationLUT.setTo(255);
valueLUT.create(1, 256, CV_8UC1);
valueLUT.setTo(255.0*(3.0/4.0));
}
if (src.type() != CV_8UC1)
{
std::cout << "Expected 8UC1 source type.";
}
Mat hue, saturation, value;
LUT(src, hueLUT, hue);
LUT(src, saturationLUT, saturation);
LUT(src, valueLUT, value);
std::vector<Mat> mv;
mv.push_back(hue);
mv.push_back(saturation);
mv.push_back(value);
Mat coloredU2;
merge(mv, coloredU2);
cvtColor(coloredU2, dst, cv::COLOR_HSV2BGR);
}
};
void Hist(const Mat &src, Mat &dst,float max=256, float min=0,int dims=-1)
{
std::vector<Mat> mv;
split(src, mv);
Mat m(mv.size(), dims, CV_32FC1);
for (size_t i=0; i<mv.size(); i++)
{
int channels[] = {0};
int histSize[] = {dims};
float range[] = {min, max};
const float* ranges[] = {range};
Mat hist, chan = mv[i];
// calcHist requires F or U, might as well convert just in case
if (mv[i].depth() != CV_8U && mv[i].depth() != CV_32F)
{
mv[i].convertTo(chan, CV_32F);
}
calcHist(&chan, 1, channels, Mat(), hist, 1, histSize, ranges);
memcpy(m.ptr(i), hist.ptr(), dims * sizeof(float));
}
dst=m.clone();
}
int main(int argc, char* argv[])
{
cv::initModule_nonfree();
cv::namedWindow("result");
cv::Mat bgr_img = cv::imread("D:\\ImagesForTest\\lena.jpg");
if (bgr_img.empty())
{
exit(EXIT_FAILURE);
}
cv::Mat gray_img;
cv::cvtColor(bgr_img, gray_img, cv::COLOR_BGR2GRAY);
cv::normalize(gray_img, gray_img, 0, 255, cv::NORM_MINMAX);
myLBP lbp(1,2);
Mat lbp_img;
lbp.process(gray_img,lbp_img);
lbp.draw(lbp_img,bgr_img);
//for(int i=0;i<lbp_img.rows;++i)
imshow("result",bgr_img);
cv::waitKey();
return 0;
}
i'm developing to Android using openCV in Eclipse. I'm trying do template matching Frame a Frame. I can't convert the template and doing the match template on it. I'm using that function:
public void initialize(){
if (src.empty())
return;
if(template == null){
Mat templ = Highgui.imread(getFileAbsPath("1.png",
Highgui.CV_LOAD_IMAGE_UNCHANGED);
template = new Mat(templ.size(), CvType.CV_32F);
Imgproc.cvtColor(templ, (Mat) template, Imgproc.COLOR_BGR2RGBA);
}
}
private String getFileAbsPath(String fileName) {
File f = new File(cacheDir, fileName);
return f.getAbsolutePath();
}
I get a error on:
Imgproc.cvtColor(templ, (Mat) template, Imgproc.COLOR_BGR2RGBA);
My image is that (it's number 1):
https://drive.google.com/file/d/0B5tH_Qo3-GvhaV9QSTUteXFiQmM/view?usp=sharing
Next, I've my method:
#Override
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
src = inputFrame.rgba();
initialize();
int match_method = Imgproc.TM_SQDIFF;
// Create the result matrix
int result_cols = src.cols() - ((Mat) template).cols() + 1;
int result_rows = src.rows() - ((Mat) template).rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32F);
// Do the Matching and Normalize
Imgproc.matchTemplate(src, (Mat) template, result, match_method);
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
MinMaxLocResult mmr = Core.minMaxLoc(result);
Point matchLoc;
if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}
Rect roi = new Rect((int) matchLoc.x, (int) matchLoc.y, ((Mat) template).cols(), ((Mat) template).rows());
Core.rectangle(src, new Point(roi.x, roi.y), new Point(roi.width - 2, roi.height - 2), new Scalar(255, 0, 0, 255), 2);
return src;
}
I get a error on that line:
Imgproc.matchTemplate(src, (Mat) template, result, match_method);
I can't do the match, don't know why ... Cans someone help me ?