ObjectDetection Android using yolo in openCV - android

I am a beginner in using OpenCV in android. I want to detect the objects using Yolo. The thing is
I don't want to use OpenCV's camera listener onCameraFrame for getting frames.
I want to use onPreviewFrame listener. As you know we go the frames in byte[].
So I don't know how to connect frames in byte[] with Yolo using OpenCV.
Here is the code that I used and it is working well
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat frame = inputFrame.rgba();
Imgproc.cvtColor(frame, frame, Imgproc.COLOR_RGBA2RGB);
Mat imageBlob = Dnn.blobFromImage(frame, 0.00392, new Size(416, 416), new Scalar(0, 0, 0), false, false);
tinyYOLO.setInput(imageBlob);
List<Mat> result = new ArrayList<>(2);
List<String> outBlobNames = new ArrayList<>();
outBlobNames.add(0, "yolo_16");
outBlobNames.add(1, "yolo_23");
tinyYOLO.forward(result, outBlobNames);
.
.
.//SOME CODE HERE
.
And now I want to use onPreviewFrame:
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
if (data == null)
throw new NullPointerException();
//---------------------HERE I Don't know what to do---------------------
Camera.Size size = camera.getParameters().getPreviewSize();
Mat frame = new Mat(size.width,size.width CvType.CV_8UC1);
frame.put(0, 0, data);
Imgproc.cvtColor(frame, frame, Imgproc.COLOR_RGBA2RGB);
//---------------------UP-------------------------------
Mat imageBlob = Dnn.blobFromImage(frame, 0.00392, new Size(416, 416), new Scalar(0, 0, 0),false, false);
tinyYOLO.setInput(imageBlob);
final List<Mat> result = new ArrayList<>(2);
final List<String> outBlobNames = new ArrayList<>();
outBlobNames.add(0, "yolo_16");
outBlobNames.add(1, "yolo_23");
tinyYOLO.forward(result, outBlobNames);
.
.
.//SOME CODE HERE
.
And with the second Code I got errors:
2020-01-23 15:26:55.279 30268-30268/com.thelonecoder.camera E/AndroidRuntime: FATAL EXCEPTION: main
Process: com.thelonecoder.camera, PID: 30268
CvException [org.opencv.core.CvException: cv::Exception: OpenCV(3.4.5) /build/3_4_pack-android/opencv/modules/imgproc/src/color.hpp:255: error: (-2:Unspecified error) in function 'cv::CvtHelper::CvtHelper(cv::InputArray, cv::OutputArray, int) [with VScn = cv::Set<3, 4>; VDcn = cv::Set<3, 4>; VDepth = cv::Set<0, 2, 5>; cv::SizePolicy sizePolicy = (cv::SizePolicy)2u; cv::InputArray = const cv::_InputArray&; cv::OutputArray = const cv::_OutputArray&]'
Invalid number of channels in input image:
'VScn::contains(scn)'
where
'scn' is 1
]

Finally, I found my answer:
I should have used 3 channels.
Mat frame = new Mat(size.height + size.height / 2, size.width, CvType.CV_8UC3);
frame.put(0, 0, data);
Imgproc.cvtColor(frame, frame, Imgproc.COLOR_RGBA2RGB);

Related

Android openCV can`t IDFT back to orginal image

enter image description hereHello, i try to transform image from time domain to frequency domain by using opencv DFT() function, it works, but i can`t transform it back by using IDFT() function。
In my getIDFT() function , i get phase from dft complex and use polarToCart to get Re and Im Data, but something wrong with my image , can somebody help me , thanks.
Github : https://github.com/pighaddt/opencv_demo/tree/master/app/src
private Mat getIDFT(Mat DFTimage){
DFTimage.convertTo(DFTimage, CvType.CV_64FC1);
DFTimage = DFTShift(DFTimage);
Mat Re = new Mat(DFTimage.size(), CvType.CV_64FC1); // expand input image to optimal size
Mat Im = new Mat(DFTimage.size(), CvType.CV_64FC1); // expand input image to optimal size
Mat ph = new Mat(DFTimage.size(), CvType.CV_64FC1);
Core.phase(planes.get(0), planes.get(1), ph, false);
Core.polarToCart(DFTData, ph, Re, Im, false);
Mat complexI = Mat.zeros(Re.rows(), Re.cols(), CvType.CV_64FC2);
Mat complexI3 = Mat.zeros(Re.rows(), Re.cols(), CvType.CV_64FC2);
List<Mat> planesIDFT = new ArrayList<Mat>();
planesIDFT.add(Re);
planesIDFT.add(Im);
Core.merge(planesIDFT, complexI); // Add to the expanded another plane with zeros
Mat invDFTcvt = new Mat(Re.size(), CvType.CV_8UC1);
Core.idft(complexI, complexI3, Core.DFT_INVERSE | Core.DFT_COMPLEX_OUTPUT | Core.DFT_SCALE, 0);
Core.split(complexI3, planesIDFT); // planes[0] = Re(DFT(I)), planes[1] = Im(DFT(I))
Core.normalize(planesIDFT.get(0), invDFTcvt, 0, 255, Core.NORM_MINMAX);
invDFTcvt.convertTo(invDFTcvt, CvType.CV_8UC1);
isDFT = false;
return invDFTcvt;
}

Pupil detection project with OpenCV & Android-Studio gets ArrayIndexOutOfBoundsException

I have to do a project for Android-studio and one of the objectives is detecting the pupils of a person. I'm trying to use OpenCV. I know there are many issues about the detection of circles or eyes in this site with OpenCV, but every time I try to execute my code, the app crashes for a particular error that nobody reports, which is:
2021-03-30 21:44:08.178 19272-19500/com.android.unipi.camerawifiprova E/AndroidRuntime: FATAL EXCEPTION: Thread-7
Process: com.android.unipi.camerawifiprova, PID: 19272
java.lang.ArrayIndexOutOfBoundsException: length=1; index=1
I share my code in the methods onCameraViewStarted() and onCameraFrame().
#Override
public void onCameraViewStarted(int width, int height) {
dst = new Mat();
matGray = new Mat();
circles = new Mat();
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
Core.transpose(mRgba,mRgbaT);
Core.flip(mRgbaT, dst,1);
Imgproc.resize(mRgbaT,mRgbaT, mRgba.size());
mRgba.release();
mRgbaT.release();
Bitmap resultBitmap;
resultBitmap = Bitmap.createBitmap(dst.cols(), dst.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(dst, resultBitmap);
matGray = new Mat(resultBitmap.getWidth(), resultBitmap.getHeight(), CvType.CV_8UC1);
Utils.bitmapToMat(resultBitmap, matGray);
int colorChannels = (matGray.channels() == 3) ? Imgproc.COLOR_BGR2GRAY
: ((matGray.channels() == 4) ? Imgproc.COLOR_BGRA2GRAY : 1);
Imgproc.cvtColor(matGray, matGray, colorChannels);
circles = new Mat(resultBitmap.getWidth(), resultBitmap.getHeight(), CvType.CV_8UC1);
Imgproc.GaussianBlur(matGray, matGray, new Size(9,9),2,2);
double dp = 1d;
double minDist = 20;
int minRadius = 0, maxRadius = 0;
double param1 = 105;
double param2 = 40;
Imgproc.HoughCircles(matGray, circles, Imgproc.CV_HOUGH_GRADIENT, dp, minDist,
param1, param2, minRadius, maxRadius);
int numCircles = (circles.rows() == 0) ? 0 : circles.cols();
for (int i = 0; i < numCircles; i++) {
double[] circlePar = circles.get(0,i);
int x = (int) circlePar[0], y = (int) circlePar[1]; // -> ArrayIndexOutOfBoundsException!!
Point center = new Point(x,y);
int radius = (int) circlePar[2];
Imgproc.circle(dst,center,radius,new Scalar(0,0,255),4);
}
matGray.release();
circles.release();
return dst;
}
I print the variable circlePar: it should have three values (center coordinates and radius) instead I got only one [0.0]. Maybe the program cannot detect any circles. I try an easy task (detect one single coin on a desk), but the app still crashes for the above reason.
After some time, I figured it out on my own. The problem is just the following line
circles = new Mat(resultBitmap.getWidth(), resultBitmap.getHeight(), CvType.CV_8UC1);
I don't have to define the dimension of the matrix circles as the dimension of the image. After I cancel that line of code, the program works fine: I can detect circles, coins and even the iris of an eye in real time.
The result of the method HoughCircles is a matrix of dimension 1xN, where N is the number of detected circles and every element of the matrix is a vector of three data: the coordinates of the center and the radius.
I know that Hough is not the best method to detect the position of the pupil, but for my project is fine.
One more thing. In order to get the gray image we could just use the method inputFrame.gray() but do not forget to transpose that image too.
matGray = inputFrame.gray();
Core.transpose(matGray,matGray);
Core.flip(matGray,matGray,1);

I need to create a bitmap in c code in NDK

So my issue is that I get for a video call the frames in my c code as a byte array of I420. Which I then convert to NV21 and send the byte array to create the bitmap. But because I need to create a YUV Image from the byte array, and then a bitmap from that, I have a conversion overhead and that is causing delays and loss in quality.
I am wondering if there is another way to do this. Somehow so that I can create the bitmap directly in the c code, and maybe even add it to the bitmap, or a surface view from the c code? Or just simply send the bitmap to my function so I can set it there, without needing to create the bitmap in Android.
This is what I do with the byte array in the c code:
if(size == 0)
return;
jboolean isAttached;
JNIEnv *env;
jint jParticipant;
jint jWidth;
jint jHeight;
jbyteArray jRawImageBytes;
env = getJniEnv(&isAttached);
if (env == NULL)
goto FAIL0;
//LOGE(".... **** ....TRYING TO FIND CALLBACK");
LOGI("FrameReceived will reach here 1");
char *modifiedRawImageBytes = malloc(size);
memcpy(modifiedRawImageBytes, rawImageBytes, size);
jint sizeWH = width * height;
jint quarter = sizeWH/4;
jint v0 = sizeWH + quarter;
for (int u = sizeWH, v = v0, o = sizeWH; u < v0; u++, v++, o += 2) {
modifiedRawImageBytes[o] = rawImageBytes[v]; // For NV21, V first
modifiedRawImageBytes[o + 1] = rawImageBytes[u]; // For NV21, U second
}
if(remote)
{
if(frameReceivedRemoteMethod == NULL)
frameReceivedRemoteMethod = getApplicationJniMethodId(env, applicationJniObj, "vidyoConferenceFrameReceivedRemoteCallback", "(III[B)V");
if (frameReceivedRemoteMethod == NULL) {
//LOGE(".... **** ....CALLBACK NOT FOUND");
goto FAIL1;
}
}
This is what I do in the Android java code:
remoteResolution = width + "x" + height;
remoteBAOS = new ByteArrayOutputStream();
remoteYUV = new YuvImage(rawImageBytes, ImageFormat.NV21, width, height, null);
remoteYUV.compressToJpeg(new Rect(0, 0, width, height), 100, remoteBAOS);
remoteBA = remoteBAOS.toByteArray();
remoteBitmap = BitmapFactory.decodeByteArray(remoteBA, 0, remoteBA.length);
new Handler(Looper.getMainLooper()).post(new Runnable() {
#Override
public void run() {
remoteView.setImageBitmap(remoteBitmap);
}
});
This is how the sample app of the SDK I am using had the sample. but I feel that this is not at all best practice, and there has to be a way to get the Bitmap quicker from the byte array, and preferably in the c code. Any ideas on how to improve this?
EDIT:
I modified my Java code. I know use this library: https://github.com/silvaren/easyrs
so my code will be:
remoteBitmap = Nv21Image.nv21ToBitmap(rs, rawImageBytes, width, height);
new Handler(Looper.getMainLooper()).post(new Runnable() {
#Override
public void run() {
remoteView.setImageBitmap(remoteBitmap);
}
});
Where nv21ToBitmap does this:
public static Bitmap yuvToRgb(RenderScript rs, Nv21Image nv21Image) {
long startTime = System.currentTimeMillis();
Type.Builder yuvTypeBuilder = new Type.Builder(rs, Element.U8(rs))
.setX(nv21Image.nv21ByteArray.length);
Type yuvType = yuvTypeBuilder.create();
Allocation yuvAllocation = Allocation.createTyped(rs, yuvType, Allocation.USAGE_SCRIPT);
yuvAllocation.copyFrom(nv21Image.nv21ByteArray);
Type.Builder rgbTypeBuilder = new Type.Builder(rs, Element.RGBA_8888(rs));
rgbTypeBuilder.setX(nv21Image.width);
rgbTypeBuilder.setY(nv21Image.height);
Allocation rgbAllocation = Allocation.createTyped(rs, rgbTypeBuilder.create());
ScriptIntrinsicYuvToRGB yuvToRgbScript = ScriptIntrinsicYuvToRGB.create(rs, Element.RGBA_8888(rs));
yuvToRgbScript.setInput(yuvAllocation);
yuvToRgbScript.forEach(rgbAllocation);
Bitmap bitmap = Bitmap.createBitmap(nv21Image.width, nv21Image.height, Bitmap.Config.ARGB_8888);
rgbAllocation.copyTo(bitmap);
Log.d("NV21", "Conversion to Bitmap: " + (System.currentTimeMillis() - startTime) + "ms");
return bitmap;
}
This is faster. but still I feel there still is some delay. Now that I get my bitmap from renderscript instead of using a YUV Image. Is it possible to set it to my imageView somehow faster? or set it on a surfaceView somehow?

Get video pixel data from Exoplayer in Android

Is it possible to grab the pixel data (e.g. as RGB byte array) from a running video within the ExoPlayer? Ideally as the real video resolution and not the size as the shown View. I'd want to forward that data to OpenCV for ImageProcessing purposes.
Alternatively I'm looking for a robust (ffmpeg based) Android framework to input videos into OpenCV where the input might be IP-Cameras, local files, online files and online streams.
Any help is appreciated.
WARNING this solution currently only works on my Android 8 emulator because of this.
Ok, here is my solution. I'm using the SimpleExoPlayer with a custom MediaCodecVideoRenderer. I'm not binding the player to a SimpleExoPlayerView because I'm not allowed to if I want to manually grab the image data. Within my custom MediaCodecVideoRenderer I override the processOutputBuffer and use getOutputImage to get a nice standard Android Image. I then send it through my OpenCV code and then transform it back into an Android Bitmap using the OpenCV Utils whenever I need. Careful this code requires API >= 21. Also this codes does not output any audio.
private void startPlayer(Context context, Uri uri) {
BandwidthMeter bandwidthMeter = new DefaultBandwidthMeter();
TrackSelection.Factory videoTrackSelectionFactory = new AdaptiveTrackSelection.Factory(bandwidthMeter);
TrackSelector trackSelector = new DefaultTrackSelector(videoTrackSelectionFactory);
SimpleExoPlayer player = ExoPlayerFactory.newSimpleInstance((eventHandler, videoRendererEventListener, audioRendererEventListener, textRendererOutput, metadataRendererOutput) -> {
return new Renderer[]{new CustomMediaCodecVideoRenderer(context, MediaCodecSelector.DEFAULT, 1000, eventHandler, videoRendererEventListener, 100)};
}, trackSelector);
DataSource.Factory dataSourceFactory = new DefaultDataSourceFactory(context, Util.getUserAgent(context, context.getPackageName()));
ExtractorMediaSource videoSource = new ExtractorMediaSource.Factory(dataSourceFactory).createMediaSource(uri);
LoopingMediaSource loopingMediaSource = new LoopingMediaSource(videoSource);
player.setPlayWhenReady(true);
player.prepare(loopingMediaSource);
}
class CustomMediaCodecVideoRenderer extends MediaCodecVideoRenderer {
CustomMediaCodecVideoRenderer(Context context, MediaCodecSelector mediaCodecSelector, long allowedJoiningTimeMs, #Nullable Handler eventHandler, #Nullable VideoRendererEventListener eventListener, int maxDroppedFrameCountToNotify) {
super(context, mediaCodecSelector, allowedJoiningTimeMs, eventHandler, eventListener, maxDroppedFrameCountToNotify);
}
#Override
protected boolean processOutputBuffer(long positionUs, long elapsedRealtimeUs, MediaCodec codec, ByteBuffer buffer, int bufferIndex, int bufferFlags, long bufferPresentationTimeUs, boolean shouldSkip) throws ExoPlaybackException {
Image image = codec.getOutputImage(bufferIndex);
if (image != null) {
Log.d(MainActivity.class.getName(), "test");
Mat mat = convertYuv420888ToMat(image, false);
// TODO do your thing with the OpenCV Mat
}
return super.processOutputBuffer(positionUs, elapsedRealtimeUs, codec, buffer, bufferIndex, bufferFlags, bufferPresentationTimeUs, shouldSkip);
}
}
I found the Image to Mat conversion here.
private Mat convertYuv420888ToMat(Image image, boolean isGreyOnly) {
int width = image.getWidth();
int height = image.getHeight();
Image.Plane yPlane = image.getPlanes()[0];
int ySize = yPlane.getBuffer().remaining();
if (isGreyOnly) {
byte[] data = new byte[ySize];
yPlane.getBuffer().get(data, 0, ySize);
Mat greyMat = new Mat(height, width, CvType.CV_8UC1);
greyMat.put(0, 0, data);
return greyMat;
}
Image.Plane uPlane = image.getPlanes()[1];
Image.Plane vPlane = image.getPlanes()[2];
// be aware that this size does not include the padding at the end, if there is any
// (e.g. if pixel stride is 2 the size is ySize / 2 - 1)
int uSize = uPlane.getBuffer().remaining();
int vSize = vPlane.getBuffer().remaining();
byte[] data = new byte[ySize + (ySize/2)];
yPlane.getBuffer().get(data, 0, ySize);
ByteBuffer ub = uPlane.getBuffer();
ByteBuffer vb = vPlane.getBuffer();
int uvPixelStride = uPlane.getPixelStride(); //stride guaranteed to be the same for u and v planes
if (uvPixelStride == 1) {
uPlane.getBuffer().get(data, ySize, uSize);
vPlane.getBuffer().get(data, ySize + uSize, vSize);
Mat yuvMat = new Mat(height + (height / 2), width, CvType.CV_8UC1);
yuvMat.put(0, 0, data);
Mat rgbMat = new Mat(height, width, CvType.CV_8UC3);
Imgproc.cvtColor(yuvMat, rgbMat, Imgproc.COLOR_YUV2RGB_I420, 3);
yuvMat.release();
return rgbMat;
}
// if pixel stride is 2 there is padding between each pixel
// converting it to NV21 by filling the gaps of the v plane with the u values
vb.get(data, ySize, vSize);
for (int i = 0; i < uSize; i += 2) {
data[ySize + i + 1] = ub.get(i);
}
Mat yuvMat = new Mat(height + (height / 2), width, CvType.CV_8UC1);
yuvMat.put(0, 0, data);
Mat rgbMat = new Mat(height, width, CvType.CV_8UC3);
Imgproc.cvtColor(yuvMat, rgbMat, Imgproc.COLOR_YUV2RGB_NV21, 3);
yuvMat.release();
return rgbMat;
}

Android OpenCV FeatureDetection -> runFeatureHomography giving messed up result image after matching

I'm pretty new to openCV, and at the moment im tryin to get an object detection running on an android device. What i basically do, is displaying a a camera preview in my app, and when i click on it it captures a picture. Then this picture is given to the runFeatureHomography - method, which first grabs the second image to which the taken image has to be compared. Then the method finds the keypoints in both pictures, computes them and matches them into one Mat called img_matches.
As basic as it can be i guess.
The object im trying to detect here at the moment is some kind of card, just like the format of a credit card. The card is blue and has lots of white and yellow text on it. I can only post one link, thats why i cant show pictures of them.
I dont know why, but when i display the result in the end / or save the result as a bitmap to my phone, it always looks kinda like this:
http://oi44.tinypic.com/oaqel0.jpg <-- result image after everything is done.
This shows me that the object i wanted to detect was indeed detected, but i dont know why there is a black background and not the pictures of the cards. Why doesn't it show my two images the way the are, just with all the lines on them?
In my code im using those three:
FeatureDetector detector = FeatureDetector.create(FeatureDetector.ORB);
DescriptorExtractor extractor = DescriptorExtractor.create(DescriptorExtractor.ORB);
DescriptorMatchermatcher = DescriptorMatcher.create(DescriptorMatcher.BRUTEFORCE);
Here is some of my code:
private void runFeatureHomography(Bitmap image)
{
Mat img_object = getObjectImage();
Mat img_scene = newEmptyMat();
Bitmap myimg = image.copy(Bitmap.Config.ARGB_8888, true);
Utils.bitmapToMat(myimg, img_scene);
MatOfKeyPoint keyPoints_object = detectObjectKeyPoints();
MatOfKeyPoint keyPoints_scene = new MatOfKeyPoint();
this.detector.detect(img_scene, keyPoints_scene);
Mat descriptors_object = calculateObjectDescriptor();
Mat descriptors_scene = newEmptyMat();
this.extractor.compute(img_scene, keyPoints_scene, descriptors_scene);
MatOfDMatch matches = new MatOfDMatch();
matcher.match(descriptors_object, descriptors_scene, matches);
double min_dist = Double.MAX_VALUE;
for (int i = -1; ++i < descriptors_object.rows();)
{
double dist = matches.toArray()[i].distance;
if (dist < min_dist)
{
min_dist = dist;
}
}
List<DMatch> good_matches = new ArrayList<DMatch>();
for (int i = -1; ++i < descriptors_object.rows();)
{
if (matches.toArray()[i].distance <= 3 * min_dist)
{
good_matches.add(matches.toArray()[i]);
}
}
System.out.println("4");
Mat img_matches = newEmptyMat();
Features2d.drawMatches(
img_object,
keyPoints_object,
img_scene,
keyPoints_scene,
new MatOfDMatch(good_matches.toArray(new DMatch[good_matches
.size()])), img_matches, Scalar.all(-1),
Scalar.all(-1), new MatOfByte(),
Features2d.NOT_DRAW_SINGLE_POINTS);
List<Point> object = new ArrayList<Point>();
List<Point> scene = new ArrayList<Point>();
for (int i = -1; ++i < good_matches.size();)
{
object.add(keyPoints_object.toArray()[good_matches.get(i).queryIdx].pt);
scene.add(keyPoints_scene.toArray()[good_matches.get(i).trainIdx].pt);
}
Mat H = Calib3d.findHomography(
new MatOfPoint2f(object.toArray(new Point[object.size()])),
new MatOfPoint2f(scene.toArray(new Point[scene.size()])),
Calib3d.RANSAC, 3);
Point[] object_corners = new Point[4];
object_corners[0] = new Point(0, 0);
object_corners[1] = new Point(img_object.cols(), 0);
object_corners[2] = new Point(img_object.cols(), img_object.rows());
object_corners[3] = new Point(0, img_object.rows());
MatOfPoint2f scene_corners2f = new MatOfPoint2f();
Core.perspectiveTransform(new MatOfPoint2f(object_corners),
scene_corners2f, H);
Point[] scene_corners = scene_corners2f.toArray();
Point[] scene_corners_norm = new Point[4];
scene_corners_norm[0] = new Point(scene_corners[0].x
+ img_object.cols(), scene_corners[0].y);
scene_corners_norm[1] = new Point(scene_corners[1].x
+ img_object.cols(), scene_corners[1].y);
scene_corners_norm[2] = new Point(scene_corners[2].x
+ img_object.cols(), scene_corners[2].y);
scene_corners_norm[3] = new Point(scene_corners[3].x
+ img_object.cols(), scene_corners[3].y);
Core.line(img_matches, scene_corners_norm[0], scene_corners_norm[1],
new Scalar(0, 255, 0), 4);
Core.line(img_matches, scene_corners_norm[1], scene_corners_norm[2],
new Scalar(0, 255, 0), 4);
Core.line(img_matches, scene_corners_norm[2], scene_corners_norm[3],
new Scalar(0, 255, 0), 4);
Core.line(img_matches, scene_corners_norm[3], scene_corners_norm[0],
new Scalar(0, 255, 0), 4);
bmp = Bitmap.createBitmap(img_matches.cols(), img_matches.rows(),
Bitmap.Config.ARGB_8888);
Intent resultIntent = new Intent("com.example.capturetest.Result");
startActivity(resultIntent);
}
private volatile Mat cachedObjectDescriptor = null;
private volatile MatOfKeyPoint cachedObjectKeyPoints = null;
private volatile Mat cachedObjectImage = null;
private Mat calculateObjectDescriptor()
{
Mat objectDescriptor = this.cachedObjectDescriptor;
if (objectDescriptor == null)
{
Mat objectImage = getObjectImage();
MatOfKeyPoint objectKeyPoints = detectObjectKeyPoints();
objectDescriptor = newEmptyMat();
this.extractor.compute(objectImage, objectKeyPoints,
objectDescriptor);
this.cachedObjectDescriptor = objectDescriptor;
}
return objectDescriptor;
}
private MatOfKeyPoint detectObjectKeyPoints()
{
MatOfKeyPoint objectKeyPoints = this.cachedObjectKeyPoints;
if (objectKeyPoints == null)
{
Mat objectImage = getObjectImage();
objectKeyPoints = new MatOfKeyPoint();
this.detector.detect(objectImage, objectKeyPoints);
this.cachedObjectKeyPoints = objectKeyPoints;
}
return objectKeyPoints;
}
private Mat getObjectImage()
{
Mat objectImage = this.cachedObjectImage;
if (objectImage == null)
{
objectImage = newEmptyMat();
Bitmap bitmap = ((BitmapDrawable) iv.getDrawable()).getBitmap();
Bitmap img = bitmap.copy(Bitmap.Config.ARGB_8888, false);
Utils.bitmapToMat(img, objectImage);
this.cachedObjectImage = objectImage;
}
return objectImage;
}
private Mat newEmptyMat()
{
return new Mat();
}
After this line matcher.match(descriptors_object, descriptors_scene, matches); i tried to convert the three Mat img_object, img_scene and matches to bitmaps and saved them to my android device just for checking. They all look as they are supposed to look, so until this point everthing is fine.
But after this part...
Mat img_matches = newEmptyMat();
Features2d.drawMatches(
img_object,
keyPoints_object,
img_scene,
keyPoints_scene,
new MatOfDMatch(good_matches.toArray(new DMatch[good_matches
.size()])), img_matches, Scalar.all(-1),
Scalar.all(-1), new MatOfByte(),
Features2d.NOT_DRAW_SINGLE_POINTS);
... i tried to convert the Mat img_matches (which is supposed to have all the information of the two input pictures if i get it right), to a bitmap and save it on my android device, but the picture looks like the picture in the link above (black pictures with lines instead of card-pictures with lines).
Does any of you know what im doing wrong here? I seem to be stuck at the moment.
Thanks in advance guys.
Edit:
Just wanted to let you know that i got the same code running and WORKING as a normal java program on my desktop. The picture is taken from the webcam there. The result image is diplayed absolutely correct in the desktop program, with cards and lines instead of black and lines ;)
Alright, found a working way:
Imgproc.cvtColor(img_object, img_object, Imgproc.COLOR_RGBA2RGB);
Imgproc.cvtColor(img_scene, img_scene, Imgproc.COLOR_RGBA2RGB);
It seems that after i converted my Bitmaps to Mats i have to use the above two lines to convert them from RGBA to RGB. It also works with RGBA to GRAY if you prefer gray pictures.
It seems that RGBA format is not working in this case.
Hope this helps anybody coming here from google.

Categories

Resources