Converting inputFrame from Camera to HSI using OpenCV in Android - android

I want to convert an image from Android camera to HSI format using OpenCV.
The problem is when I use the following method
private Mat rgb2hsi(Mat rgbFrame) {
Mat hsiFrame = rgbFrame.clone();
for( int i = 0; i < rgbFrame.rows(); ++i ) {
for( int j = 0; j < rgbFrame.cols(); ++j ) {
double[] rgb = rgbFrame.get(i, j);
Log.d(MAINTAG, "rgbFrame.get(i, j) array size = " + rgb.length);
double colorR = rgb[0];
double colorG = rgb[1];
double colorB = rgb[2];
double minRGB = min(colorR, colorG, colorB);
double colorI = (colorR + colorG + colorB) / 3;
double colorS = 0.0;
if(colorI > 0) colorS = 1.0 - (minRGB / colorI);
double colorH;
double const1 = colorR - (colorG / 2) - (colorB / 2);
double const2 = Math.sqrt(Math.pow(colorR, 2) + Math.pow(colorG, 2) + Math.pow(colorR, 2)
- (colorR * colorG) - (colorR * colorB) - (colorG * colorB));
colorH = Math.acos(const1 / const2);
if(colorB > colorG) colorH = 360 - colorH;
double[] hsi = {colorH, colorS, colorI};
hsiFrame.put(i, j, hsi);
}
}
return hsiFrame;
}
It shows an error
java.lang.UnsupportedOperationException: Provided data element number (3) should be multiple of the Mat channels count (4)
I search for a while to figure out the cause of this error.
I found that I put an array of size 3 instead of 4.
Android convert byte array from Camera API to color Mat object openCV
I wonder what Type of image receive from Android Camera.
Why when I get an array of size 4?
How to convert an image received from Android camera to HSI and preview on the screen?
The following is the overrided method onCameraFrame
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Mat outputFrame = inputFrame.rgba();
/* Get RGB color from the pixel at [index_row, index_column] */
int index_row = 0;
int index_column = 0;
final double[] mRgb_pixel = outputFrame.get(index_row, index_column);
/* Show the result */
runOnUiThread(new Runnable() {
#Override
public void run() {
int r = (int) mRgb_pixel[0];
int g = (int) mRgb_pixel[1];
int b = (int) mRgb_pixel[2];
/* Set RGB color */
mRred_textview.setText("Red\n" + Double.toString(mRgb_pixel[0]));
mGreen_textview.setText("Green\n" + Double.toString(mRgb_pixel[1]));
mBlue_textview.setText("Blue\n" + Double.toString(mRgb_pixel[2]));
mColor_textview.setBackgroundColor(Color.rgb(r, g, b));
}
});
if(mPreviewType == PreviewType.GB) {
outputFrame.convertTo(outputFrame, CvType.CV_64FC3);
return getGBColor(rgb2hsi(outputFrame));
} else if (mPreviewType == PreviewType.HSI) {
outputFrame.convertTo(outputFrame, CvType.CV_64FC3);
return rgb2hsi(outputFrame);
} else {
return outputFrame;
}
}
My MainActivity implements CameraBridgeViewBase.CvCameraViewListener2
[Edit]
I think that the reason why it return an array of size 4 is because the frame is in RGBA format, not RGB format.
Therefore, how to convert RGBA to HSI and preview the frame on the screen?

The problem here is that your hsiFrame is a 4 channel image and your hsi array has only 3 values. You need to add one term corresponding to alpha channel to your hsi array. Making either of the following changes should work for you:
1. double[] hsi = {colorH, colorS, colorI, rgb[3]};
2. Mat hsiFrame = new Mat(rgbFrame.size(), CvType.CV_8UC3);
Hope this helps.

Related

Detect taken images brightness/darkness level - using Android CameraX

I'm using CameraX to capture images on Android.
I wanted to implement feature that would analyze captured images brightness/darkness level - if image is too dark/bright.
Is there some elegant way of doing this? Maybe some powerful light library that is meant for this?
Current approach is a code pieces that was found somewhere on Stackoverflow:
public static boolean isDark(Bitmap bitmap){
boolean dark=false;
float darkThreshold = bitmap.getWidth()*bitmap.getHeight()*0.45f;
int darkPixels=0;
int[] pixels = new int[bitmap.getWidth()*bitmap.getHeight()];
bitmap.getPixels(pixels,0,bitmap.getWidth(),0,0,bitmap.getWidth(),bitmap.getHeight());
for(int pixel : pixels){
int color = pixels[i];
int r = Color.red(color);
int g = Color.green(color);
int b = Color.blue(color);
double luminance = (0.299*r+0.0f + 0.587*g+0.0f + 0.114*b+0.0f);
if (luminance<150) {
darkPixels++;
}
}
if (darkPixels >= darkThreshold) {
dark = true;
}
long duration = System.currentTimeMillis()-s;
return dark;
}
Second approach is to use SensorManager TYPE_LIGHT. Any more ideas/approaches?
The more efficient way would be calculating the luminance without converting the output to Bitmap.
private final ImageAnalysis.Analyzer mAnalyzer = image -> {
byte[] bytes = new byte[image.getPlanes()[0].getBuffer().remaining()];
image.getPlanes()[0].getBuffer().get(bytes);
int total = 0;
for (byte value : bytes) {
total += value & 0xFF;
}
if (bytes.length != 0) {
final int luminance = total / bytes.length;
// luminance is the value you need.
}
image.close();
};
Source: CameraX test app source code

Android ImageReader YUV 420 888 Repeating Data

I am trying to convert an Image received from ImageReader using the Camera 2 API to a OpenCV matrix and display it on screen using CameraBridgeViewBase, more specifically the function deliverAndDrawFrame. The ImageFormat for the reader is YUV_420_888, which, as far as I understand, has a Y plane with grayscale values for each pixel, and a U plane that has U/V every other with 1 for every 4 pixels. However, when I try to display this image it appears as if the image is repeating and is rotated 90 degrees. The code below is supposed to put the YUV data into a OpenCV matrix (just grayscale for now, not rgba):
/**
* Takes an {#link Image} in the {#link ImageFormat#YUV_420_888} and puts it into a provided {#link Mat} in rgba format.
*
* #param yuvImage {#link Image} in the {#link ImageFormat#YUV_420_888} format.
*/
public static void yuv420888imageToRgbaMat(final Image yuvImage, final Mat rgbaMat) {
final Image.Plane
Yp = yuvImage.getPlanes()[0],
UandVp = yuvImage.getPlanes()[1];
final ByteBuffer
Ybb = Yp .getBuffer(),
UandVbb = UandVp.getBuffer();
Ybb .get(mYdata , 0, 480*640 );
UandVbb.get(mUandVData, 0, 480*640 / 2 - 8);
for (int i = 0; i < 640*480; i++) {
for (int j = 0; j < 4; j++) {
mRawRGBAFrameData[i + 640*480*j] = mYdata[i];
}
mRawRGBAFrameData[i*4 ] = mYdata[i];
mRawRGBAFrameData[i*4+1] = mYdata[i];
mRawRGBAFrameData[i*4+2] = mYdata[i];
mRawRGBAFrameData[i*4+3] = -1;
}
}
Here is my code for the OpenCV frame:
private class CameraFrame implements CvCameraViewFrame {
private Mat mRgba;
#Override
public Mat gray() {
return null;
}
#Override
public Mat rgba() {
mRgbaMat.put(0, 0, mRawRGBAFrameData);
return mRgba;
}
public CameraFrame(final Mat rgba) {
super();
mRgba = rgba;
}
}
The code for receiving drawing the frame:
private final ImageReader.OnImageAvailableListener mOnImageAvailableListener = new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
final Image yuvImage = reader.acquireLatestImage();
yuv420888imageToRgbaMat(yuvImage, mRgbaMat);
deliverAndDrawFrame(mFrame);
yuvImage.close();
}
};
And, this is the code for making the image reader:
mRgbaMat = new Mat(mFrameHeight, mFrameWidth, CvType.CV_8UC4);
mFrame = new CameraFrame(mRgbaMat);
mImageReader = ImageReader.newInstance(mFrameWidth, mFrameHeight, ImageFormat.YUV_420_888, 1);
mImageReader.setOnImageAvailableListener(mOnImageAvailableListener, mBackgroundHandler);
AllocateCache();
This is the initialization of the arrays:
protected static byte[] mRawRGBAFrameData = new byte[640*480*4], mYdata = new byte[640*480], mUandVData = new byte[640*480 / 2];
Notes: mFrameWidth is 480 and mFrameHeight is 640. One weird thing is that the height and width for ImageReader and the Image received from it have inverted dimensions.
Here is the image with the code above: https://i.stack.imgur.com/lcdzf.png
Here is the image with this instead in yuv420888imageToRgbaMat https://i.stack.imgur.com/T2MOI.png
for (int i = 0; i < 640*480; i++) {
mRawRGBAFrameData[i] = mYdata[i];
}
We can see that data is repeating in the Y frame and for some reason this gives an actual good looking image.
For anyone having the same problem of trying to use OpenCV with the Camera 2 API, I have come up with a solution. The first thing that I discovered was the fact that there is padding in the ByteBuffer that the ImageReader supplies, so this can cause distortion in the output if you do not account for it. Another thing that I chose do to was to create my own SurfaceView and draw to it using a Bitmap instead of using CameraViewBase, and so far it has worked out great. OpenCV has a function Util.matToBitmap that takes a BGR matrix and converts it to an android Bitmap, so that has been useful. I obtain the BGR matrix by putting information from the first two Image.Planes supplied by the ImageReader into an OpenCV one channel matrix that is formatted as YUV 420, and using Imgproc.cvtColor with Imgproc.COLOR_YUV420p2BGR. The important thing to know is that the Y plane of the image has full pixels, but the second UV plane has interleaved pixels that map one to four Y pixels, so the total length of the UV plane is half of the Y plane. See here. Anyways, here is some code:
Initialization of matrices
m_BGRMat = new Mat(Constants.VISION_IMAGE_HEIGHT, Constants.VISION_IMAGE_WIDTH, CvType.CV_8UC3);
m_Yuv420FrameMat = new Mat(Constants.VISION_IMAGE_HEIGHT * 3 / 2, Constants.VISION_IMAGE_WIDTH, CvType.CV_8UC1);
Every frame:
// Convert image to YUV 420 matrix
ImageUtils.imageToMat(image, m_Yuv420FrameMat, m_RawFrameData, m_RawFrameRowData);
// Convert YUV matrix to BGR matrix
Imgproc.cvtColor(m_Yuv420FrameMat, m_BGRMat, Imgproc.COLOR_YUV420p2BGR);
// Flip width and height then mirror vertically
Core.transpose(m_BGRMat, m_BGRMat);
Core.flip(m_BGRMat, m_BGRMat, 0);
// Draw to Surface View
m_PreviewView.drawImageMat(m_BGRMat);
Here is the conversion to YUV 420 matrix:
/**
* Takes an Android {#link Image} in the {#link ImageFormat#YUV_420_888} format and returns an OpenCV {#link Mat}.
*
* #param image {#link Image} in the {#link ImageFormat#YUV_420_888} format
*/
public static void imageToMat(final Image image, final Mat mat, byte[] data, byte[] rowData) {
ByteBuffer buffer;
int rowStride, pixelStride, width = image.getWidth(), height = image.getHeight(), offset = 0;
Image.Plane[] planes = image.getPlanes();
if (data == null || data.length != width * height) data = new byte[width * height * ImageFormat.getBitsPerPixel(ImageFormat.YUV_420_888) / 8];
if (rowData == null || rowData.length != planes[0].getRowStride()) rowData = new byte[planes[0].getRowStride()];
for (int i = 0; i < planes.length; i++) {
buffer = planes[i].getBuffer();
rowStride = planes[i].getRowStride();
pixelStride = planes[i].getPixelStride();
int
w = (i == 0) ? width : width / 2,
h = (i == 0) ? height : height / 2;
for (int row = 0; row < h; row++) {
int bytesPerPixel = ImageFormat.getBitsPerPixel(ImageFormat.YUV_420_888) / 8;
if (pixelStride == bytesPerPixel) {
int length = w * bytesPerPixel;
buffer.get(data, offset, length);
// Advance buffer the remainder of the row stride, unless on the last row.
// Otherwise, this will throw an IllegalArgumentException because the buffer
// doesn't include the last padding.
if (h - row != 1)
buffer.position(buffer.position() + rowStride - length);
offset += length;
} else {
// On the last row only read the width of the image minus the pixel stride
// plus one. Otherwise, this will throw a BufferUnderflowException because the
// buffer doesn't include the last padding.
if (h - row == 1)
buffer.get(rowData, 0, width - pixelStride + 1);
else
buffer.get(rowData, 0, rowStride);
for (int col = 0; col < w; col++)
data[offset++] = rowData[col * pixelStride];
}
}
}
mat.put(0, 0, data);
}
And finally, drawing
/**
* Given an {#link Mat} that represents a BGR image, draw it on the surface canvas.
* use the OpenCV helper function {#link Utils#matToBitmap(Mat, Bitmap)} to create a {#link Bitmap}.
*
* #param bgrMat BGR frame {#link Mat}
*/
public void drawImageMat(final Mat bgrMat) {
if (m_HolderReady) {
// Create bitmap from BGR matrix
Utils.matToBitmap(bgrMat, m_Bitmap);
// Obtain the canvas and draw the bitmap on top of it
final SurfaceHolder holder = getHolder();
final Canvas canvas = holder.lockCanvas();
canvas.drawBitmap(m_Bitmap, null, new Rect(0, 0, m_HolderWidth, m_HolderHeight), null);
holder.unlockCanvasAndPost(canvas);
}
}
This way works, but I imagine the best way to do it is to set up an OpenGL rendering context and write some sort of simple shader to display the matrix.

Histogram Matching in Renderscript

In order to align the intensity values of two grayscale Images (as a first step for further processing) I wrote a Java method that:
converts the bitmaps of the two images into two int[] arrays containing the bitmap's intensities (I just take the red component here, since it's grayscale, i.e. r=g=b ).
public static int[] bmpToData(Bitmap bmp){
int width = bmp.getWidth();
int height = bmp.getHeight();
int anzpixel = width*height;
int [] pixels = new int[anzpixel];
int [] data = new int[anzpixel];
bmp.getPixels(pixels, 0, width, 0, 0, width, height);
for (int i = 0 ; i < anzpixel ; i++) {
int p = pixels[i];
int r = (p & 0xff0000) >> 16;
//int g = (p & 0xff00) >> 8;
//int b = p & 0xff;
data[i] = r;
}
return data;
}
aligns the cumulated intensity distributions of Bitmap 2 to that of Bitmap 1
//aligns the intensity distribution of a grayscale picture moving (given by int[] //data2) the the intensity distribution of a reference picture fixed (given by // int[] data1)
public static int[] histMatch(int[] data1, int[] data2){
int anzpixel = data1.length;
int[] histogram_fixed = new int[256];
int[] histogram_moving = new int[256];
int[] cumhist_fixed = new int[256];
int[] cumhist_moving = new int[256];
int i=0;
int j=0;
//read intensities of fixed und moving in histogram
for (int n = 0; n < anzpixel; n++) {
histogram_fixed[data1[n]]++;
histogram_moving[data2[n]]++;
}
// calc cumulated distributions
cumhist_fixed[0]=histogram_fixed[0];
cumhist_moving[0]=histogram_moving[0];
for ( i=1; i < 256; ++i ) {
cumhist_fixed[i] = cumhist_fixed[i-1]+histogram_fixed[i];
cumhist_moving[i] = cumhist_moving[i-1]+histogram_moving [i];
}
// look-up-table lut[]. For each quantile i of the moving picture search the
// value j of the fixed picture where the quantile is the same as that of moving
int[] lut = new int[anzpixel];
j=0;
for ( i=0; i < 256; ++i ){
while(cumhist_fixed[j]< cumhist_moving[i]){
j++;
}
// check, whether the distance to the next-lower intensity is even lower, and if so, take this value
if ((j!=0) && ((cumhist_fixed[j-1]- cumhist_fixed[i]) < (cumhist_fixed[j]- cumhist_fixed[i]))){
lut[i]= (j-1);
}
else {
lut[i]= (j);
}
}
// apply the lut[] to moving picture.
i=0;
for (int n = 0; n < anzpixel; n++) {
data2[n]=(int) lut[data2[n]];
}
return data2;
}
converts the int[] arrays back to Bitmap.
public static Bitmap dataToBitmap(int[] data, int width, int heigth) {
int index=0;
Bitmap bmp = Bitmap.createBitmap(width, heigth, Bitmap.Config.ARGB_8888);
for (int x = 0; x < width; x++) {
for (int y = 0; y < heigth; y++) {
index=y*width+x;
int c = data[index];
bmp.setPixel(x,y,Color.rgb(c, c, c));
}
}
return bmp;
}
While the core procedure 2) is straightforward and fast, the conversion steps 1) and 3) are rather inefficient. It would be more than cool to do the whole thing in Renderscript. But, honestly, I am completely lost in doing so because of missing documentation and, while there are many impressing examples on what Renderscript COULD perform, I don't see a way to benefit from these possibilities (no books, no docu). Any advice is highly appreciated!
As a starting point, use Android Studio to "Import Sample..." and select Basic Render Script. This will give you a working project that we will now modify.
First, let's add more Allocation references to MainActivity. We will use them to communicate image data, histograms and the LUT between Java and Renderscript.
private Allocation mInAllocation;
private Allocation mInAllocation2;
private Allocation[] mOutAllocations;
private Allocation mHistogramAllocation;
private Allocation mHistogramAllocation2;
private Allocation mLUTAllocation;
Then in onCreate() load another image, which you will also need to add to /res/drawables/.
mBitmapIn2 = loadBitmap(R.drawable.cat_480x400);
In createScript() create additional allocations:
mInAllocation2 = Allocation.createFromBitmap(mRS, mBitmapIn2);
mHistogramAllocation = Allocation.createSized(mRS, Element.U32(mRS), 256);
mHistogramAllocation2 = Allocation.createSized(mRS, Element.U32(mRS), 256);
mLUTAllocation = Allocation.createSized(mRS, Element.U32(mRS), 256);
And now the main part (in RenderScriptTask):
/*
* Invoke histogram kernel for both images
*/
mScript.bind_histogram(mHistogramAllocation);
mScript.forEach_compute_histogram(mInAllocation);
mScript.bind_histogram(mHistogramAllocation2);
mScript.forEach_compute_histogram(mInAllocation2);
/*
* Variables copied verbatim from your code.
*/
int []histogram_fixed = new int[256];
int []histogram_moving = new int[256];
int[] cumhist_fixed = new int[256];
int[] cumhist_moving = new int[256];
int i=0;
int j=0;
// copy computed histograms to Java side
mHistogramAllocation.copyTo(histogram_fixed);
mHistogramAllocation2.copyTo(histogram_moving);
// your code again...
// calc cumulated distributions
cumhist_fixed[0]=histogram_fixed[0];
cumhist_moving[0]=histogram_moving[0];
for ( i=1; i < 256; ++i ) {
cumhist_fixed[i] = cumhist_fixed[i-1]+histogram_fixed[i];
cumhist_moving[i] = cumhist_moving[i-1]+histogram_moving [i];
}
// look-up-table lut[]. For each quantile i of the moving picture search the
// value j of the fixed picture where the quantile is the same as that of moving
int[] lut = new int[256];
j=0;
for ( i=0; i < 256; ++i ){
while(cumhist_fixed[j]< cumhist_moving[i]){
j++;
}
// check, whether the distance to the next-lower intensity is even lower, and if so, take this value
if ((j!=0) && ((cumhist_fixed[j-1]- cumhist_fixed[i]) < (cumhist_fixed[j]- cumhist_fixed[i]))){
lut[i]= (j-1);
}
else {
lut[i]= (j);
}
}
// copy the LUT to Renderscript side
mLUTAllocation.copyFrom(lut);
mScript.bind_LUT(mLUTAllocation);
// Apply LUT to the destination image
mScript.forEach_apply_histogram(mInAllocation2, mInAllocation2);
/*
* Copy to bitmap and invalidate image view
*/
//mOutAllocations[index].copyTo(mBitmapsOut[index]);
// copy back to Bitmap in preparation for viewing the results
mInAllocation2.copyTo((mBitmapsOut[index]));
Couple notes:
In your part of the code I also fixed LUT allocation size - only 256 locations are needed,
As you can see, I left the computation of cumulative histogram and LUT on Java side. These are rather difficult to efficiently parallelize due to data dependencies and small scale of the calculations, but considering the latter I don't think it's a problem.
Finally, the Renderscript code. The only non-obvious part is the use of rsAtomicInc() to increase values in histogram bins - this is necessary due to potentially many threads attempting to increase the same bin concurrently.
#pragma version(1)
#pragma rs java_package_name(com.example.android.basicrenderscript)
#pragma rs_fp_relaxed
int32_t *histogram;
int32_t *LUT;
void __attribute__((kernel)) compute_histogram(uchar4 in)
{
volatile int32_t *addr = &histogram[in.r];
rsAtomicInc(addr);
}
uchar4 __attribute__((kernel)) apply_histogram(uchar4 in)
{
uchar val = LUT[in.r];
uchar4 result;
result.r = result.g = result.b = val;
result.a = in.a;
return(result);
}

Find dominant color in a camera frame in OpenCV Android

I want to get the dominant color in an Android CvCameraViewFrame object. I use the following OpenCV Android code to do that. This code is converted from OpenCV c++ code to OpenCV Android code. In the following code I loop through all the pixels in my camera frame and find the color of each pixel and store them in a HashMap to find the dominant color at the end of the loop. To loop through each pixel it takes about 30 seconds. This is unacceptable for me. Could somebody please review this code and point me how can I find the dominant color in a camera frame.
private String[] colors = {"cBLACK", "cWHITE", "cGREY", "cRED", "cORANGE", "cYELLOW", "cGREEN", "cAQUA", "cBLUE", "cPURPLE", "cPINK", "cRED"};
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
Imgproc.cvtColor(mRgba, mRgba, Imgproc.COLOR_BGR2HSV);
int h = mRgba.height(); // Pixel height
int w = mRgba.width(); // Pixel width
int rowSize = (int)mRgba.step1(); // Size of row in bytes, including extra padding
float initialConfidence = 1.0f;
Map<String, Integer> tallyColors = new HashMap<String, Integer>();
byte[] pixelsTotal = new byte[h*rowSize];
mRgba.get(0,0,pixelsTotal);
//This for loop takes about 30 seconds to process for my camera frame
for (int y=0; y<h; y++) {
for (int x=0; x<w; x++) {
// Get the HSV pixel components
int hVal = (int)pixelsTotal[(y*rowSize) + x + 0]; // Hue
int sVal = (int)pixelsTotal[(y*rowSize) + x + 1]; // Saturation
int vVal = (int)pixelsTotal[(y*rowSize) + x + 2]; // Value (Brightness)
// Determine what type of color the HSV pixel is.
String ctype = getPixelColorType(hVal, sVal, vVal);
// Keep count of these colors.
int totalNum = 0;
try{
totalNum = tallyColors.get(ctype);
} catch(Exception ex){
totalNum = 0;
}
totalNum++;
tallyColors.put(ctype, totalNum);
}
}
int tallyMaxIndex = 0;
int tallyMaxCount = -1;
int pixels = w * h;
for (int i=0; i<colors.length; i++) {
String v = colors[i];
int pixCount;
try{
pixCount = tallyColors.get(v);
} catch(Exception e){
pixCount = 0;
}
Log.i(TAG, v + " - " + (pixCount*100/pixels) + "%, ");
if (pixCount > tallyMaxCount) {
tallyMaxCount = pixCount;
tallyMaxIndex = i;
}
}
float percentage = initialConfidence * (tallyMaxCount * 100 / pixels);
Log.i(TAG, "Color of currency note: " + colors[tallyMaxIndex] + " (" + percentage + "% confidence).");
}
return mRgba;
}
private String getPixelColorType(int H, int S, int V)
{
String color;
if (V < 75)
color = "cBLACK";
else if (V > 190 && S < 27)
color = "cWHITE";
else if (S < 53 && V < 185)
color = "cGREY";
else { // Is a color
if (H < 14)
color = "cRED";
else if (H < 25)
color = "cORANGE";
else if (H < 34)
color = "cYELLOW";
else if (H < 73)
color = "cGREEN";
else if (H < 102)
color = "cAQUA";
else if (H < 127)
color = "cBLUE";
else if (H < 149)
color = "cPURPLE";
else if (H < 175)
color = "cPINK";
else // full circle
color = "cRED"; // back to Red
}
return color;
}
Thank you very much.
OpenCV has an Histogram method which counts all image colors. After the histogram is calculated all you would have to do is to chose the one with the biggest count...
Check here for a tutorial (C++): Histogram Calculation.
You might also the this stackoverflow answer which shows an example on how to use Android's histogram function Imgproc.calcHist().
Think about to resize your images, then you may multiply the results by the same scale:
resize( larg_image, smallerImage , interpolation=cv.CV_INTER_CUBIC );
Or,
you may check these solutions:
You could find dominant color using k-mean clustering method.
this link will be useful.
https://www.youtube.com/watch?v=f54-x3PckH8

Color detection on Vuforia frames with OpenCV for Android

I need to do color detection(ball tracking) for Augmented Reality. I want to use Qualcomms Vuforia SDK for AR and OpenCV for image processing. I found a color detection algorithm that works on desktop(OpenCV, C++) and tried to apply this to FrameMarkers(a Vuforia sample code) but no success yet.
I got a frame from Vuforia(I can only get RGB565 or GRAYSCALE frames.) and convert to OpenCV Mat object and apply same steps with desktop solution. But I got an error on HSV conversion side. Below is the code.
//HSV range for orange objects
const int H_MIN = 7;
const int S_MIN = 186;
const int V_MIN = 60;
const int H_MAX = 256;
const int S_MAX = 256;
const int V_MAX = 157;
const bool shouldUseMorphologicalOperators = true;
const int FRAME_WIDTH = 240;
const int FRAME_HEIGHT = 320;
const int MAX_NUM_OBJECTS = 50;
const int MIN_OBJECT_AREA = 20 * 20;
const int MAX_OBJECT_AREA = 320 * 240 / 1.5;
ObjectTracker::ObjectTracker()
{
x=y=0;
}
ObjectTracker::~ObjectTracker()
{
}
void ObjectTracker::track(QCAR::Frame frame)
{
int nImages = frame.getNumImages();
for(int i = 0; i < nImages; i++)
{
const QCAR::Image *image = frame.getImage(i);
if(image->getFormat() == QCAR::RGB565)
{
Mat RGB565 = Mat(image->getHeight(),image->getWidth(),CV_8UC2,(unsigned char *)image->getPixels());
Mat HSV;
//I got error an error here
cvtColor(RGB565,HSV,CV_RGB2HSV);
Mat thresholdedImage;
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),thresholdedImage);
if(shouldUseMorphologicalOperators)
applyMorphologicalOperator(thresholdedImage);
trackFilteredObject(x,y,thresholdedImage,RGB565);
//waitKey(30);
}
}
}
void ObjectTracker::applyMorphologicalOperator(Mat &thresholdedImage)
{
//create structuring element that will be used to "dilate" and "erode" image
//the element chosen here is 3px by 3px rectangle
Mat erodeElement = getStructuringElement(MORPH_RECT,Size(3,3));
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement(MORPH_RECT,Size(8,8));
erode(thresholdedImage,thresholdedImage,erodeElement);
erode(thresholdedImage,thresholdedImage,erodeElement);
dilate(thresholdedImage,thresholdedImage,dilateElement);
dilate(thresholdedImage,thresholdedImage,dilateElement);
}
void ObjectTracker::trackFilteredObject(int &x,int &y,Mat &thresholdedImage,Mat &cameraFeed)
{
Mat temp;
thresholdedImage.copyTo(temp);
//Two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarcy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarcy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);
//use moments method to find out filtered object
double refArea = 0;
bool objectFound = false;
if(hierarcy.size() > 0)
{
int nObjects = hierarcy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(nObjects < MAX_NUM_OBJECTS )
{
for(int index = 0; index >= 0; index = hierarcy[index][0])
{
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
//if the area is less than 20 px by 20 px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if(area > MIN_OBJECT_AREA && area < MAX_OBJECT_AREA && area > refArea)
{
x = moment.m10/area;
y = moment.m01/area;
objectFound = true;
refArea = area;
}
else
objectFound = false;
}
//let user know you found an object
if(objectFound ==true)
{
LOG("Object found");
highlightObject(x,y,cameraFeed);
}
}
else
{
LOG("Too much noise");
}
}
else
LOG("Object not found");
}
void ObjectTracker::highlightObject(int x,int y,Mat &frame)
{
}
How to do proper conversion from RGB565 to HSV color space?
Convert it to RGB888 first using some code from this SO Question.
If you have RGB888 your conversion to HSV should work fine.
EDIT: As mentioned in the Comment. In OpenCV you can do it like this:
use cvtColor(BGR565,RGB,CV_BGR5652BGR) to conver from RGB565 to RGB and then cvtColor(RGB,HSV,CV_RGB2HSV) to convert from RGB to HSV.
EDIT2: It seems that you have to use BGR5652BGR since there is no RGB5652RGB

Categories

Resources