Android OpenCv people count - android

I am developing an Android application to count number of people in real time video. I used OpenCv to detect people but not finding a way to count them. If any one knows how to do it then please help me.
Here is the code to detect people in coming video frame
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
List<MatOfPoint> list = new ArrayList<>();
Mat frame = new Mat();
Mat gray = new Mat();
Mat hierarchy = new Mat();
Mat originalFrame = inputFrame.rgba();
Core.transpose(originalFrame,mRgbaT);
Imgproc.resize(mRgbaT,mRgbaF,mRgbaF.size(),0,0,0);
Core.flip(mRgbaF,originalFrame,1);
Imgproc.medianBlur(originalFrame,originalFrame,3);
Imgproc.cvtColor(originalFrame, gray, Imgproc.CV_BGR2GRAY, 0);
HOGDescriptor hog = new HOGDescriptor();
//Получаем стандартный определитель людей и устанавливаем его нашему дескриптору
MatOfFloat descriptors = HOGDescriptor.getDefaultPeopleDetector();
// MatOfFloat dfdfdf=HOGDescriptor.getDefaultPeopleDetector();
hog.setSVMDetector(descriptors);
MatOfRect locations = new MatOfRect();
MatOfDouble weights = new MatOfDouble();
hog.detectMultiScale(gray, locations, weights);
Point rectPoint1 = new Point();
Point rectPoint2 = new Point();
Point fontPoint = new Point();
if (locations.rows() > 0) {
List<Rect> rectangles = locations.toList();
for (Rect rect : rectangles) {
rectPoint1.x = rect.x;
rectPoint1.y = rect.y;
fontPoint.x = rect.x;
fontPoint.y = rect.y - 4;
rectPoint2.x = rect.x + rect.width;
rectPoint2.y = rect.y + rect.height;
final Scalar rectColor = new Scalar( 0 , 0 , 0 );
// Добавляем на изображения найденную информацию
Imgproc.rectangle(originalFrame, rectPoint1, rectPoint2, rectColor, 2);
}
}
frame.release();
gray.release();
hierarchy.release();
int i=list.size();
list.clear();
return originalFrame;
}

Related

Unexpected results in attempt to perform Image Alignment | Image Registeration | OpenCV | Android

I attempted to implement Image Alignment using OpenCV for android which worked on one of my test case but it failed to work on another test case.
Test Case #1 - for which my code worked:
Given two Images, like for example Scene Image and Object Image,
I want output object Image to be aligned to Scene Image. Check out the Final Output.
Test Case #2 - for which my code didn't worked:
Given Images, Scene Image and Object Image,
I got this feature matching result and this as my output to warpPerspective.
My question is, can anyone please tell me why I am unable to align Images in second test case.
Mat mObjectMat = new Mat();
Mat mSceneMat = new Mat();
Mat img3 = mSceneMat.clone();
//Bitmap to Mat
Utils.bitmapToMat(inputImage2, mObjectMat);
Utils.bitmapToMat(inputImage1, mSceneMat);
//rgb to gray
Imgproc.cvtColor(mObjectMat, mObjectMat, Imgproc.COLOR_RGBA2GRAY);
Imgproc.cvtColor(mSceneMat, mSceneMat, Imgproc.COLOR_RGBA2GRAY);
//find interest points/keypoints in an image
MatOfKeyPoint keypoints_object = new MatOfKeyPoint();
MatOfKeyPoint keypoints_scene = new MatOfKeyPoint();
FeatureDetector fd = FeatureDetector.create(FeatureDetector.ORB);
fd.detect(mObjectMat, keypoints_object);
fd.detect(mSceneMat, keypoints_scene);
//extract descriptor
Mat descriptors_object = new Mat();
Mat descriptors_scene = new Mat();
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
extractor.compute(mObjectMat, keypoints_object, descriptors_object);
extractor.compute(mSceneMat, keypoints_scene, descriptors_scene);
//match keypoint descriptors
MatOfDMatch matches = new MatOfDMatch();
DescriptorMatcher matcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE_HAMMING);
matcher.match( descriptors_object, descriptors_scene, matches);
List<DMatch> matchesList = matches.toList();
//Calculate max and min distances between keypoints
matchesList = matches.toList();
Double maxDistance = 0.0;
Double minDistance = 100.0;
for( int i = 0; i < descriptors_object.rows(); i++ )
{
Double dist = (double) matchesList.get(i).distance;
if( dist < minDistance ) minDistance = dist;
if( dist > maxDistance ) maxDistance = dist;
}
//display
Toast.makeText(getApplicationContext(), "[ Max dist : " + maxDistance + " ] [ Min dist : " + minDistance + " ]", Toast.LENGTH_LONG).show();
//find good matches
LinkedList<DMatch> good_matches = new LinkedList<DMatch>();
MatOfDMatch gm = new MatOfDMatch();
//Draw only good matches
for(int i = 0; i < descriptors_object.rows(); i++){
if(matchesList.get(i).distance < 3*minDistance){
good_matches.addLast(matchesList.get(i));
}
}
gm.fromList(good_matches);
//display matches on imageView
Mat opt = new Mat();
Scalar RED = new Scalar(255,0,0);
Scalar GREEN = new Scalar(0,255,0);
MatOfByte drawnMatches = new MatOfByte();
Features2d.drawMatches(mObjectMat,keypoints_object,mSceneMat,keypoints_scene,gm,opt,GREEN, RED, drawnMatches, Features2d.NOT_DRAW_SINGLE_POINTS);
List<KeyPoint> keypoints_objectList = keypoints_object.toList();
List<KeyPoint> keypoints_sceneList = keypoints_scene.toList();
LinkedList<Point> objList = new LinkedList<Point>();
LinkedList<Point> sceneList = new LinkedList<Point>();
MatOfPoint2f obj = new MatOfPoint2f();
MatOfPoint2f scene = new MatOfPoint2f();
//Localize the object & find the keypoints from the good matches
//separate corresponding points for both images
for(int i = 0; i<good_matches.size(); i++){
objList.addLast(keypoints_objectList.get(good_matches.get(i).queryIdx).pt);
sceneList.addLast(keypoints_sceneList.get(good_matches.get(i).trainIdx).pt);
}
obj.fromList(objList);
scene.fromList(sceneList);
//Find homography - perspective transformation between two planes
Mat H = Calib3d.findHomography(obj, scene, Calib3d.RANSAC);
//perform perspective warp
Mat imgWarped = new Mat();
Imgproc.warpPerspective(mObjectMat, imgWarped, H, mSceneMat.size());
//warp and scene images
Mat finalImage = new Mat();
Core.add(mSceneMat, imgWarped,finalImage);
//add image to imageView
Bitmap imageMatched = Bitmap.createBitmap(finalImage.cols(), finalImage.rows(), Bitmap.Config.RGB_565);//need to save bitmap
Utils.matToBitmap(finalImage, imageMatched);
imageView1.setImageBitmap(imageMatched);

opencv deskewing a contour

InputImage
ResultImage
I have been able to filter the largest contour in the image to detect the token.
I have applied warp perception but it is only cropping the image at the edges of the contour, nothing else.
I want the detected token to be cropped out of the rest of the image entireley, de-skew it while keeping proportions so the result image should be upright, straight. Then I will move forward with finding the blobs in the token to detect the dates marked inside it.
private Mat processMat(Mat srcMat) {
Mat processedMat = new Mat();
Imgproc.cvtColor(srcMat, processedMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(processedMat, processedMat, new Size(5, 5), 5);
Imgproc.threshold(processedMat, processedMat, 127, 255, Imgproc.THRESH_BINARY);
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Imgproc.findContours(processedMat, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double maxVal = 0;
int maxValIdx = 0;
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++) {
double contourArea = Imgproc.contourArea(contours.get(contourIdx));
if (maxVal < contourArea) {
maxVal = contourArea;
maxValIdx = contourIdx;
}
}
if (!contours.isEmpty()) {
Imgproc.drawContours(srcMat, contours, maxValIdx, new Scalar(0,255,0), 3);
Rect rect = Imgproc.boundingRect(contours.get(maxValIdx));
Log.e("rect", "" + rect);
int top = srcMat.height();
int left = srcMat.width();
int right = 0;
int bottom = 0;
if(rect.x < left) {
left = rect.x;
}
if(rect.x+rect.width > right){
right = rect.x+rect.width;
}
if(rect.y < top){
top = rect.y;
}
if(rect.y+rect.height > bottom){
bottom = rect.y+rect.height;
}
Point topLeft = new Point(left, top);
Point topRight = new Point(right, top);
Point bottomRight = new Point(right, bottom);
Point bottomLeft = new Point(left, bottom);
return warp(srcMat, topLeft, topRight, bottomLeft, bottomRight);
}
return null;
}
Mat warp(Mat inputMat, Point topLeft, Point topRight, Point bottomLeft, Point bottomRight) {
int resultWidth = (int)(topRight.x - topLeft.x);
int bottomWidth = (int)(bottomRight.x - bottomLeft.x);
if(bottomWidth > resultWidth)
resultWidth = bottomWidth;
int resultHeight = (int)(bottomLeft.y - topLeft.y);
int bottomHeight = (int)(bottomRight.y - topRight.y);
if (bottomHeight > resultHeight) {
resultHeight = bottomHeight;
}
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC1);
List<Point> source = new ArrayList<>();
source.add(topLeft);
source.add(topRight);
source.add(bottomLeft);
source.add(bottomRight);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Point ocvPOut1 = new Point(0, 0);
Point ocvPOut2 = new Point(resultWidth, 0);
Point ocvPOut3 = new Point(0, resultHeight);
Point ocvPOut4 = new Point(resultWidth, resultHeight);
List<Point> dest = new ArrayList<>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight));
return outputMat;
}
UPDATE 1
Replaced This:
return warp(srcMat, topLeft, topRight, bottomLeft, bottomRight);
With This:
return warp(srcMat, topLeft, topRight, bottomRight, bottomLeft);
Result Update 1:
UPDATE 2
public Mat warp(Mat inputMat, MatOfPoint selectedContour) {
MatOfPoint2f new_mat = new MatOfPoint2f(selectedContour.toArray());
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
int contourSize = (int) selectedContour.total();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
double[] temp_double;
temp_double = approxCurve_temp.get(0,0);
Point p1 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve_temp.get(1,0);
Point p2 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve_temp.get(2,0);
Point p3 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve_temp.get(3,0);
Point p4 = new Point(temp_double[0], temp_double[1]);
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
int resultWidth = 846;
int resultHeight = 2048;
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
Point ocvPOut1 = new Point(0, 0);
Point ocvPOut2 = new Point(0, resultHeight);
Point ocvPOut3 = new Point(resultWidth, resultHeight);
Point ocvPOut4 = new Point(resultWidth, 0);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight),
Imgproc.INTER_CUBIC);
return outputMat;
}
Result Update 2:
I have changed my warp function a bit and the code is attached.
However the resultant image is rotated somehow in the wrong direction. Can you guide me which is the correct way to do this.
Android device orientation is set to: portrait and the input image is in portrait as well.
UPDATE 3
I have managed to straighten the token by sorting the corners like so:
List<Point> source = new ArrayList<Point>();
source.add(p2);
source.add(p3);
source.add(p4);
source.add(p1);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Result Update 3:
However the resultant image is cropped from the left side which I have no idea how to tackle that.
I have managed to straighten the input image if the token is tilted to the right or left and the output image is straight nonetheless. However if the input image already has the token centred and straight up. it rotates the token like so, using the same code:
Issue Update 3:
The transformation to deskew the ticket is close to an affine one. You can obtain it by approximating the outline with a parallelogram. You find the vertices of the parallelogram as the leftmost, topmost, rightmost and bottommost points.
Actually, you just need three vertices (and the fourth can be recomputed from these). Maybe a least-square fitting of the parallelogram is possible, I don't know.
Another option is to consider an homographic transform, which is defined from four points (but the computation is much more complex). It will take perspective into account. (You might get some insight here: https://www.codeproject.com/Articles/674433/Perspective-Projection-of-a-Rectangle-Homography.)
To straighten up the image, it suffice to apply the inverse transform and retrieve a rectangle. Anyway, you will notice that the size of this rectangle is unknown, so that you can scale it arbitrarily. The hardest issue is to find a suitable aspect ratio.

not detecting filled circle, showing empty circle instead- Opencv

I am new to OpenCV and trying to make omr scanner from scratch and have snippet which is supposed to detect filled circles but it is doing quite opposite and detecting unfilled circle. Please show me my mistake in the code.
public void showFilledCircles(Bitmap paramView)
{
paramView = BitmapFactory.decodeFile(filename);
Mat localMat1 = new Mat();
Utils.bitmapToMat(paramView, localMat1);
Object localObject1 = new Mat();
double[] lo;
Imgproc.GaussianBlur(localMat1, (Mat)localObject1, new Size(3.0D, 3.0D), 3.0D, 2.5D);
Mat localMat2 = new Mat();
Imgproc.cvtColor((Mat)localObject1, localMat2, 7);
localObject1 = new ArrayList();
Object localObject2 = new Mat();
Mat localMat3 = new Mat();
Imgproc.Canny(localMat2, localMat3, 140.0D, 255.0D);
Imgproc.findContours(localMat3, (List)localObject1, (Mat)localObject2,1,2);
int i = 0;
while (i < ((List)localObject1).size())
{
Imgproc.drawContours(localMat2, (List)localObject1, i, new Scalar(0.0D, 0.0D, 255.0D), 2);
//Log.i("Local Objects","Local Object Point -------------------"+localMat2);
i += 1;
}
localObject1 = new Mat();
Core.inRange(localMat2, new Scalar(70.0D, 70.0D, 70.0D), new Scalar(255.0D, 255.0D, 255.0D), (Mat)localObject1);
localMat2 = localMat1.clone();
Imgproc.HoughCircles((Mat)localObject1, localMat2, 3, 1.0D, 20.0D, 40.0D, 10.0D, 6, 18);
i = 0;
for (;;)
{
if (i < localMat2.cols())
{
localObject1 = localMat2.get(0, i);
lo = localMat2.get(0, i);
if (localObject1 != null) {}
}
else
{
Utils.matToBitmap(localMat1, paramView);
this.imageView.setImageBitmap(paramView);
//this.imageView.setRotation(90.0F);
return;
}
localObject2 = new Point(Math.round(lo[0]), Math.round(lo[1]));
int j = (int)Math.round(lo[2]);
Log.i("cicle Points ---------", localObject2 + " radius " + j);
Imgproc.circle(localMat1, (Point)localObject2, 1, new Scalar(0.0D, 0.0D, 255.0D), 5);
Imgproc.circle(localMat1, (Point)localObject2, j, new Scalar(255.0D, 0.0D, 0.0D), 5);
i += 1;
}
}
output image
for more precise detection of filled circle.
step 1: detect contours on image
step 2: create rectangle around detected contours.
step 3: depending on rectangle height and width choose only those contours which you want.
for filled circle detection refer to this question
Detect filled circle using opencv4Android

OpenCV Histogram Matching of Color Image

I am trying to write an Android App that performs histogram matching of color images using OpenCV3.1. I found this code example in C++, and converted it to java.
I tried to match each RGB chanel separately but it did not gave me the desired results. So now I'm converting the images to YUV color space and then match the Y chanels. What I hope to achieve is a brightnes matching, so if the source is brighter then the target I will get darker image.
So far it seems that the Y histogram of the final(output) image is somewhat close to the Y histogram of target image, but the actual output dosen't look like the target.
Here is the relevan code:
private Mat calculateLUT(Mat in_cdf_mat, Mat dst_cdf_mat) {
int last = 0;
double epsilon = Double.parseDouble(epsilonTextView.getText().toString());// epsilon set to 0.01
Mat M = new Mat(256, 1,CvType.CV_8UC1);
for(int j=0; j<in_cdf_mat.rows(); j++) {
double F1j = in_cdf_mat.get(j,0)[0];
for(int k = last; k < dst_cdf_mat.rows(); k++) {
double F2k = dst_cdf_mat.get(k,0)[0];
if(Math.abs(F2k - F1j) < epsilon || F2k > F1j) {
double[] data = {k} ;
M.put(j, 0, data);
last = k;
break;
}
}
}
return M;
}
private void calculateCDF (Mat channel, Mat cdf)
{
// channel holds the histogram. The indices represents the pixel color
// and the value is the amount of pixels of that color in the image
for (int i = 1; i < 256; i++) {
double[] data = new double[1];
data[0] = cdf.get(i-1,0)[0] + channel.get(i,0)[0];
cdf.put(i, 0, data);
}
}
private void calcHistogram(String imgPath, Mat y_hist, Mat y_cdf) {
Mat image;
Mat ycrcb = new Mat();
image = Imgcodecs.imread(imgPath);
Imgproc.cvtColor(image, ycrcb, Imgproc.COLOR_RGB2YCrCb);
image.release();
List<Mat> ycrcbChannels= new ArrayList<>();
Core.split(ycrcb,ycrcbChannels);
List<Mat> yList = new ArrayList<>();
yList.add(ycrcbChannels.get(0));
MatOfInt histSize = new MatOfInt(256);
MatOfFloat histRange = new MatOfFloat(0f, 256f);
Imgproc.calcHist(yList, new MatOfInt(0), new Mat(), y_hist, histSize, histRange, false);
Core.normalize(y_hist, y_hist, 3, 255, Core.NORM_MINMAX);
calculateCDF(y_hist, y_cdf);
Core.normalize(y_cdf, y_cdf, 3, 255, Core.NORM_MINMAX);
}
private void transformLight(Mat inputImage, Mat outputImage, Mat ylut) {
Mat imageYCrCb = new Mat();
Imgproc.cvtColor(inputImage, imageYCrCb, Imgproc.COLOR_RGB2YCrCb);
Mat y_chanel = new Mat();
Core.extractChannel(imageYCrCb, y_chanel, 0);
Mat cr_chanel = new Mat();
Core.extractChannel(imageYCrCb, cr_chanel, 1);
Mat cb_chanel = new Mat();
Core.extractChannel(imageYCrCb, cb_chanel, 2);
Core.LUT(y_chanel, ylut,y_chanel);
ArrayList<Mat> ycrcbDest = new ArrayList<>();
ycrcbDest.add(y_chanel);
ycrcbDest.add(cr_chanel);
ycrcbDest.add(cb_chanel);
Core.merge(ycrcbDest,outputImage);
Imgproc.cvtColor(outputImage, outputImage, Imgproc.COLOR_YCrCb2RGB);
}
private static void drawLine (Mat mat, int i, long bin_w, int hist_h, Mat histImage, Scalar color) {
// bin_w set to 1
Point p0 = new Point(bin_w * (i - 1), hist_h - Math.round(mat.get(i-1,0)[0]) );
Point p1 = new Point(bin_w * (i), hist_h - Math.round(mat.get(i,0)[0]) );
Imgproc.line(histImage, p0, p1, color, 5, 8, 0);
}
private void drawHistogram(Mat histImage, Mat graph, Scalar color) {
for (int i = 1; i < 256; i++) {
drawLine(graph, i, bin_w, histImage.rows(), histImage, color);
}
}
private void histNCDFtoFile(String filename, Mat hist, Mat cdf, Scalar histColor, Scalar cdfColor) {
Mat histImage = new Mat(256, 256, CvType.CV_8UC3);
drawHistogram(histImage, hist, histColor);
drawHistogram(histImage, cdf, cdfColor);
saveImage(filename, histImage);
}
private Mat matchHistograms(String input, String traget) {
Mat input_y_hist = new Mat();
Mat target_y_hist = new Mat();
calcHistogram(input, input_y_hist, input_y_cdf_mat);
histNCDFtoFile("inputHistNCDF.jpg", input_y_hist, input_y_cdf_mat, inputHistColor, inputCDFColor);
calcHistogram(traget, target_y_hist, target_y_cdf_mat);
histNCDFtoFile("targetHistNCDF.jpg", target_y_hist, target_y_cdf_mat, targetHistColor, targetCDFColor);
Mat ylut = calculateLUT(input_y_cdf_mat, target_y_cdf_mat);
Mat image;
Mat dst = new Mat(); // this Matrix will hold the transformed image
image = Imgcodecs.imread(input);
transformLight(image, dst, ylut);
return dst;
}
Here is an exmaple image from pixabay that I want to transform:
And this is the image that I use as a target:
And this is the result:
The CDF of the result and the target is here:
The light green is the target CDF and the light blue is the result CDF

Android OpenCV FeatureDetection -> runFeatureHomography giving messed up result image after matching

I'm pretty new to openCV, and at the moment im tryin to get an object detection running on an android device. What i basically do, is displaying a a camera preview in my app, and when i click on it it captures a picture. Then this picture is given to the runFeatureHomography - method, which first grabs the second image to which the taken image has to be compared. Then the method finds the keypoints in both pictures, computes them and matches them into one Mat called img_matches.
As basic as it can be i guess.
The object im trying to detect here at the moment is some kind of card, just like the format of a credit card. The card is blue and has lots of white and yellow text on it. I can only post one link, thats why i cant show pictures of them.
I dont know why, but when i display the result in the end / or save the result as a bitmap to my phone, it always looks kinda like this:
http://oi44.tinypic.com/oaqel0.jpg <-- result image after everything is done.
This shows me that the object i wanted to detect was indeed detected, but i dont know why there is a black background and not the pictures of the cards. Why doesn't it show my two images the way the are, just with all the lines on them?
In my code im using those three:
FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB);
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
DescriptorMatchermatcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
Here is some of my code:
private void runFeatureHomography(Bitmap image)
{
Mat img_object = getObjectImage();
Mat img_scene = newEmptyMat();
Bitmap myimg = image.copy(Bitmap.Config.ARGB_8888, true);
Utils.bitmapToMat(myimg, img_scene);
MatOfKeyPoint keyPoints_object = detectObjectKeyPoints();
MatOfKeyPoint keyPoints_scene = new MatOfKeyPoint();
this.detector.detect(img_scene, keyPoints_scene);
Mat descriptors_object = calculateObjectDescriptor();
Mat descriptors_scene = newEmptyMat();
this.extractor.compute(img_scene, keyPoints_scene, descriptors_scene);
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors_object, descriptors_scene, matches);
double min_dist = Double.MAX_VALUE;
for (int i = -1; ++i < descriptors_object.rows();)
{
double dist = matches.toArray()[i].distance;
if (dist < min_dist)
{
min_dist = dist;
}
}
List<DMatch> good_matches = new ArrayList<DMatch>();
for (int i = -1; ++i < descriptors_object.rows();)
{
if (matches.toArray()[i].distance <= 3 * min_dist)
{
good_matches.add(matches.toArray()[i]);
}
}
System.out.println("4");
Mat img_matches = newEmptyMat();
Features2d.drawMatches(
img_object,
keyPoints_object,
img_scene,
keyPoints_scene,
new MatOfDMatch(good_matches.toArray(new DMatch[good_matches
.size()])), img_matches, Scalar.all(-1),
Scalar.all(-1), new MatOfByte(),
Features2d.NOT_DRAW_SINGLE_POINTS);
List<Point> object = new ArrayList<Point>();
List<Point> scene = new ArrayList<Point>();
for (int i = -1; ++i < good_matches.size();)
{
object.add(keyPoints_object.toArray()[good_matches.get(i).queryIdx].pt);
scene.add(keyPoints_scene.toArray()[good_matches.get(i).trainIdx].pt);
}
Mat H = Calib3d.findHomography(
new MatOfPoint2f(object.toArray(new Point[object.size()])),
new MatOfPoint2f(scene.toArray(new Point[scene.size()])),
Calib3d.RANSAC, 3);
Point[] object_corners = new Point[4];
object_corners[0] = new Point(0, 0);
object_corners[1] = new Point(img_object.cols(), 0);
object_corners[2] = new Point(img_object.cols(), img_object.rows());
object_corners[3] = new Point(0, img_object.rows());
MatOfPoint2f scene_corners2f = new MatOfPoint2f();
Core.perspectiveTransform(new MatOfPoint2f(object_corners),
scene_corners2f, H);
Point[] scene_corners = scene_corners2f.toArray();
Point[] scene_corners_norm = new Point[4];
scene_corners_norm[0] = new Point(scene_corners[0].x
+ img_object.cols(), scene_corners[0].y);
scene_corners_norm[1] = new Point(scene_corners[1].x
+ img_object.cols(), scene_corners[1].y);
scene_corners_norm[2] = new Point(scene_corners[2].x
+ img_object.cols(), scene_corners[2].y);
scene_corners_norm[3] = new Point(scene_corners[3].x
+ img_object.cols(), scene_corners[3].y);
Core.line(img_matches, scene_corners_norm[0], scene_corners_norm[1],
new Scalar(0, 255, 0), 4);
Core.line(img_matches, scene_corners_norm[1], scene_corners_norm[2],
new Scalar(0, 255, 0), 4);
Core.line(img_matches, scene_corners_norm[2], scene_corners_norm[3],
new Scalar(0, 255, 0), 4);
Core.line(img_matches, scene_corners_norm[3], scene_corners_norm[0],
new Scalar(0, 255, 0), 4);
bmp = Bitmap.createBitmap(img_matches.cols(), img_matches.rows(),
Bitmap.Config.ARGB_8888);
Intent resultIntent = new Intent("com.example.capturetest.Result");
startActivity(resultIntent);
}
private volatile Mat cachedObjectDescriptor = null;
private volatile MatOfKeyPoint cachedObjectKeyPoints = null;
private volatile Mat cachedObjectImage = null;
private Mat calculateObjectDescriptor()
{
Mat objectDescriptor = this.cachedObjectDescriptor;
if (objectDescriptor == null)
{
Mat objectImage = getObjectImage();
MatOfKeyPoint objectKeyPoints = detectObjectKeyPoints();
objectDescriptor = newEmptyMat();
this.extractor.compute(objectImage, objectKeyPoints,
objectDescriptor);
this.cachedObjectDescriptor = objectDescriptor;
}
return objectDescriptor;
}
private MatOfKeyPoint detectObjectKeyPoints()
{
MatOfKeyPoint objectKeyPoints = this.cachedObjectKeyPoints;
if (objectKeyPoints == null)
{
Mat objectImage = getObjectImage();
objectKeyPoints = new MatOfKeyPoint();
this.detector.detect(objectImage, objectKeyPoints);
this.cachedObjectKeyPoints = objectKeyPoints;
}
return objectKeyPoints;
}
private Mat getObjectImage()
{
Mat objectImage = this.cachedObjectImage;
if (objectImage == null)
{
objectImage = newEmptyMat();
Bitmap bitmap = ((BitmapDrawable) iv.getDrawable()).getBitmap();
Bitmap img = bitmap.copy(Bitmap.Config.ARGB_8888, false);
Utils.bitmapToMat(img, objectImage);
this.cachedObjectImage = objectImage;
}
return objectImage;
}
private Mat newEmptyMat()
{
return new Mat();
}
After this line matcher.match(descriptors_object, descriptors_scene, matches); i tried to convert the three Mat img_object, img_scene and matches to bitmaps and saved them to my android device just for checking. They all look as they are supposed to look, so until this point everthing is fine.
But after this part...
Mat img_matches = newEmptyMat();
Features2d.drawMatches(
img_object,
keyPoints_object,
img_scene,
keyPoints_scene,
new MatOfDMatch(good_matches.toArray(new DMatch[good_matches
.size()])), img_matches, Scalar.all(-1),
Scalar.all(-1), new MatOfByte(),
Features2d.NOT_DRAW_SINGLE_POINTS);
... i tried to convert the Mat img_matches (which is supposed to have all the information of the two input pictures if i get it right), to a bitmap and save it on my android device, but the picture looks like the picture in the link above (black pictures with lines instead of card-pictures with lines).
Does any of you know what im doing wrong here? I seem to be stuck at the moment.
Thanks in advance guys.
Edit:
Just wanted to let you know that i got the same code running and WORKING as a normal java program on my desktop. The picture is taken from the webcam there. The result image is diplayed absolutely correct in the desktop program, with cards and lines instead of black and lines ;)
Alright, found a working way:
Imgproc.cvtColor(img_object, img_object, Imgproc.COLOR_RGBA2RGB);
Imgproc.cvtColor(img_scene, img_scene, Imgproc.COLOR_RGBA2RGB);
It seems that after i converted my Bitmaps to Mats i have to use the above two lines to convert them from RGBA to RGB. It also works with RGBA to GRAY if you prefer gray pictures.
It seems that RGBA format is not working in this case.
Hope this helps anybody coming here from google.

Categories

Resources