OpenCV convert color per pixel - android

Hello i want to convert the color in image, i'm using per-pixel methods but it seems very slow
src.getPixels(pixels, 0, width, 0, 0, width, height);
// RGB values
int R;
for (int i = 0; i < pixels.length; i++) {
// Get RGB values as ints
// Set pixel color
pixels[i] = color;
}
// Set pixels
src.setPixels(pixels, 0, width, 0, 0, width, height);
my question, is there any way i can do it using openCV? change pixel to the color i want ?

I recommend this excellent article on how to access/modify an opencv image buffer. I recommend
"the efficient way":
int i,j;
uchar* p;
for( i = 0; i < nRows; ++i)
{
p = I.ptr<uchar>(i);
for ( j = 0; j < nCols; ++j)
{
p[j] = table[p[j]];
}
Or "the iterator-safe method":
MatIterator_<Vec3b> it, end;
for( it = I.begin<Vec3b>(), end = I.end<Vec3b>(); it != end; ++it)
{
(*it)[0] = table[(*it)[0]];
(*it)[1] = table[(*it)[1]];
(*it)[2] = table[(*it)[2]];
}
For further optimizations, using cv::LUT() (where possible) can give huge speedups, but it is more intensive to design/code.

You can access Pixels by using:
img.at<Type>(y, x);
So to change an RGB Value you can use:
// read color
Vec3b intensity = img.at<Vec3b>(y, x);
// compute new color using intensity.val[0] etc. to access color values
// write new color
img.at<Vec3b>(y, x) = intensity;
#Boyko mentioned an Article from OpenCV concerning fast access to the image pixels if you want to iterate over all Pixel. The Method I would prefer from this Article is the iterator Method, as it is only slightly slower than direct pointer access but safer to use.
Example Code:
Mat& AssignNewColors(Mat& img)
{
// accept only char type matrices
CV_Assert(img.depth() != sizeof(uchar));
const int channels = img.channels();
switch(channels)
{
// case 1: skipped here
case 3:
{
// Read RGG Pixels
Mat_<Vec3b> _img = img;
for( int i = 0; i < img.rows; ++i)
for( int j = 0; j < img.cols; ++j )
{
_img(i,j)[0] = computeNewColor(_img(i,j)[0]);
_img(i,j)[1] = computeNewColor(_img(i,j)[1]);
_img(i,j)[2] = computeNewColor(_img(i,j)[2]);
}
img = _img;
break;
}
}
return img;
}

Related

Histogram Matching in Renderscript

In order to align the intensity values of two grayscale Images (as a first step for further processing) I wrote a Java method that:
converts the bitmaps of the two images into two int[] arrays containing the bitmap's intensities (I just take the red component here, since it's grayscale, i.e. r=g=b ).
public static int[] bmpToData(Bitmap bmp){
int width = bmp.getWidth();
int height = bmp.getHeight();
int anzpixel = width*height;
int [] pixels = new int[anzpixel];
int [] data = new int[anzpixel];
bmp.getPixels(pixels, 0, width, 0, 0, width, height);
for (int i = 0 ; i < anzpixel ; i++) {
int p = pixels[i];
int r = (p & 0xff0000) >> 16;
//int g = (p & 0xff00) >> 8;
//int b = p & 0xff;
data[i] = r;
}
return data;
}
aligns the cumulated intensity distributions of Bitmap 2 to that of Bitmap 1
//aligns the intensity distribution of a grayscale picture moving (given by int[] //data2) the the intensity distribution of a reference picture fixed (given by // int[] data1)
public static int[] histMatch(int[] data1, int[] data2){
int anzpixel = data1.length;
int[] histogram_fixed = new int[256];
int[] histogram_moving = new int[256];
int[] cumhist_fixed = new int[256];
int[] cumhist_moving = new int[256];
int i=0;
int j=0;
//read intensities of fixed und moving in histogram
for (int n = 0; n < anzpixel; n++) {
histogram_fixed[data1[n]]++;
histogram_moving[data2[n]]++;
}
// calc cumulated distributions
cumhist_fixed[0]=histogram_fixed[0];
cumhist_moving[0]=histogram_moving[0];
for ( i=1; i < 256; ++i ) {
cumhist_fixed[i] = cumhist_fixed[i-1]+histogram_fixed[i];
cumhist_moving[i] = cumhist_moving[i-1]+histogram_moving [i];
}
// look-up-table lut[]. For each quantile i of the moving picture search the
// value j of the fixed picture where the quantile is the same as that of moving
int[] lut = new int[anzpixel];
j=0;
for ( i=0; i < 256; ++i ){
while(cumhist_fixed[j]< cumhist_moving[i]){
j++;
}
// check, whether the distance to the next-lower intensity is even lower, and if so, take this value
if ((j!=0) && ((cumhist_fixed[j-1]- cumhist_fixed[i]) < (cumhist_fixed[j]- cumhist_fixed[i]))){
lut[i]= (j-1);
}
else {
lut[i]= (j);
}
}
// apply the lut[] to moving picture.
i=0;
for (int n = 0; n < anzpixel; n++) {
data2[n]=(int) lut[data2[n]];
}
return data2;
}
converts the int[] arrays back to Bitmap.
public static Bitmap dataToBitmap(int[] data, int width, int heigth) {
int index=0;
Bitmap bmp = Bitmap.createBitmap(width, heigth, Bitmap.Config.ARGB_8888);
for (int x = 0; x < width; x++) {
for (int y = 0; y < heigth; y++) {
index=y*width+x;
int c = data[index];
bmp.setPixel(x,y,Color.rgb(c, c, c));
}
}
return bmp;
}
While the core procedure 2) is straightforward and fast, the conversion steps 1) and 3) are rather inefficient. It would be more than cool to do the whole thing in Renderscript. But, honestly, I am completely lost in doing so because of missing documentation and, while there are many impressing examples on what Renderscript COULD perform, I don't see a way to benefit from these possibilities (no books, no docu). Any advice is highly appreciated!
As a starting point, use Android Studio to "Import Sample..." and select Basic Render Script. This will give you a working project that we will now modify.
First, let's add more Allocation references to MainActivity. We will use them to communicate image data, histograms and the LUT between Java and Renderscript.
private Allocation mInAllocation;
private Allocation mInAllocation2;
private Allocation[] mOutAllocations;
private Allocation mHistogramAllocation;
private Allocation mHistogramAllocation2;
private Allocation mLUTAllocation;
Then in onCreate() load another image, which you will also need to add to /res/drawables/.
mBitmapIn2 = loadBitmap(R.drawable.cat_480x400);
In createScript() create additional allocations:
mInAllocation2 = Allocation.createFromBitmap(mRS, mBitmapIn2);
mHistogramAllocation = Allocation.createSized(mRS, Element.U32(mRS), 256);
mHistogramAllocation2 = Allocation.createSized(mRS, Element.U32(mRS), 256);
mLUTAllocation = Allocation.createSized(mRS, Element.U32(mRS), 256);
And now the main part (in RenderScriptTask):
/*
* Invoke histogram kernel for both images
*/
mScript.bind_histogram(mHistogramAllocation);
mScript.forEach_compute_histogram(mInAllocation);
mScript.bind_histogram(mHistogramAllocation2);
mScript.forEach_compute_histogram(mInAllocation2);
/*
* Variables copied verbatim from your code.
*/
int []histogram_fixed = new int[256];
int []histogram_moving = new int[256];
int[] cumhist_fixed = new int[256];
int[] cumhist_moving = new int[256];
int i=0;
int j=0;
// copy computed histograms to Java side
mHistogramAllocation.copyTo(histogram_fixed);
mHistogramAllocation2.copyTo(histogram_moving);
// your code again...
// calc cumulated distributions
cumhist_fixed[0]=histogram_fixed[0];
cumhist_moving[0]=histogram_moving[0];
for ( i=1; i < 256; ++i ) {
cumhist_fixed[i] = cumhist_fixed[i-1]+histogram_fixed[i];
cumhist_moving[i] = cumhist_moving[i-1]+histogram_moving [i];
}
// look-up-table lut[]. For each quantile i of the moving picture search the
// value j of the fixed picture where the quantile is the same as that of moving
int[] lut = new int[256];
j=0;
for ( i=0; i < 256; ++i ){
while(cumhist_fixed[j]< cumhist_moving[i]){
j++;
}
// check, whether the distance to the next-lower intensity is even lower, and if so, take this value
if ((j!=0) && ((cumhist_fixed[j-1]- cumhist_fixed[i]) < (cumhist_fixed[j]- cumhist_fixed[i]))){
lut[i]= (j-1);
}
else {
lut[i]= (j);
}
}
// copy the LUT to Renderscript side
mLUTAllocation.copyFrom(lut);
mScript.bind_LUT(mLUTAllocation);
// Apply LUT to the destination image
mScript.forEach_apply_histogram(mInAllocation2, mInAllocation2);
/*
* Copy to bitmap and invalidate image view
*/
//mOutAllocations[index].copyTo(mBitmapsOut[index]);
// copy back to Bitmap in preparation for viewing the results
mInAllocation2.copyTo((mBitmapsOut[index]));
Couple notes:
In your part of the code I also fixed LUT allocation size - only 256 locations are needed,
As you can see, I left the computation of cumulative histogram and LUT on Java side. These are rather difficult to efficiently parallelize due to data dependencies and small scale of the calculations, but considering the latter I don't think it's a problem.
Finally, the Renderscript code. The only non-obvious part is the use of rsAtomicInc() to increase values in histogram bins - this is necessary due to potentially many threads attempting to increase the same bin concurrently.
#pragma version(1)
#pragma rs java_package_name(com.example.android.basicrenderscript)
#pragma rs_fp_relaxed
int32_t *histogram;
int32_t *LUT;
void __attribute__((kernel)) compute_histogram(uchar4 in)
{
volatile int32_t *addr = &histogram[in.r];
rsAtomicInc(addr);
}
uchar4 __attribute__((kernel)) apply_histogram(uchar4 in)
{
uchar val = LUT[in.r];
uchar4 result;
result.r = result.g = result.b = val;
result.a = in.a;
return(result);
}

Android OpenCV color detection in HSV space

i tried to wrote color(green) detection code for android(live camera view) in OpenCV. first use RGB space and it's half okay but when switch to the HSV space the result is the mess !!!
this is my code
Mat A = src;
Mat B = dst;
Imgproc.cvtColor(A, A, Imgproc.COLOR_RGB2HSV,3);
Size sizeA = A.size();
for (int i = 0; i < sizeA.height; i++)
for (int j = 0; j < sizeA.width; j++) {
double[] data = A.get(i, j);
if (data[0]>=95 && data[0]<=130 & data[1]>=150 && data[1]<=255 & data[2]<=150 && data[2]<=255){
data[0] = 120;
data[1] = 255 ;
data[2] = 255 ;
}
else
data[0] = 100;
data[1] = 255;
data[2] = 255;
B.put(i, j, data);
}
Imgproc.cvtColor(B, B, Imgproc.COLOR_RGB2RGBA, 4);}
}
what's wrong with this code ? and this method run slowly.why?
(i new to android and OpenCV)
Tnx
You should probably convert from BGR (not RGB) to HSV, but that depends on your code before this snippet
Imgproc.cvtColor(A, A, Imgproc.COLOR_BGR2HSV,3);
Check your if statement and use always && (you sometimes use &)
You should convert from HSV to RGB and then to RGBA
Imgproc.cvtColor(B, B, Imgproc.COLOR_HSV2RGB, 3);}
Imgproc.cvtColor(B, B, Imgproc.COLOR_RGB2RGBA, 4);}

how to get pixel color using byte array in Android

In my Android project,
Here is my code.
for (int x = 0; x < targetBitArray.length; x += weight) {
for (int y = 0; y < targetBitArray[x].length; y += weight) {
targetBitArray[x][y] = bmp.getPixel(x, y) == mSearchColor;
}
}
but this code wastes a lot of time.
So I need to find way faster than bitmap.getPixel().
I'm trying to get pixel color using byte array converted from bitmap, but I can't.
How to replace Bitmap.getPixel()?
Each Bitmap.getPixel method invocation requires a lot of resources, so you need to avoid the amount of requests in order to improve the performace of your code.
My suggestion is:
Read the image data row-by-row with Bitmap.getPixels method into a local array
Iterate along your local array
e.g.
int [] rowData= new int [bitmapWidth];
for (int row = 0; row < bitmapHeight; row ++) {
// Load row of pixels
bitmap.getPixels(rowData, 0, bitmapWidth, 0, row, bitmapWidth, 1);
for (int column = 0; column < bitmapWidth; column ++) {
targetBitArray[column][row] = rowData(column) == mSearchColor;
}
}
This will be a great improvement for the performace of your code

Find dominant color in a camera frame in OpenCV Android

I want to get the dominant color in an Android CvCameraViewFrame object. I use the following OpenCV Android code to do that. This code is converted from OpenCV c++ code to OpenCV Android code. In the following code I loop through all the pixels in my camera frame and find the color of each pixel and store them in a HashMap to find the dominant color at the end of the loop. To loop through each pixel it takes about 30 seconds. This is unacceptable for me. Could somebody please review this code and point me how can I find the dominant color in a camera frame.
private String[] colors = {"cBLACK", "cWHITE", "cGREY", "cRED", "cORANGE", "cYELLOW", "cGREEN", "cAQUA", "cBLUE", "cPURPLE", "cPINK", "cRED"};
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
if (mIsColorSelected) {
Imgproc.cvtColor(mRgba, mRgba, Imgproc.COLOR_BGR2HSV);
int h = mRgba.height(); // Pixel height
int w = mRgba.width(); // Pixel width
int rowSize = (int)mRgba.step1(); // Size of row in bytes, including extra padding
float initialConfidence = 1.0f;
Map<String, Integer> tallyColors = new HashMap<String, Integer>();
byte[] pixelsTotal = new byte[h*rowSize];
mRgba.get(0,0,pixelsTotal);
//This for loop takes about 30 seconds to process for my camera frame
for (int y=0; y<h; y++) {
for (int x=0; x<w; x++) {
// Get the HSV pixel components
int hVal = (int)pixelsTotal[(y*rowSize) + x + 0]; // Hue
int sVal = (int)pixelsTotal[(y*rowSize) + x + 1]; // Saturation
int vVal = (int)pixelsTotal[(y*rowSize) + x + 2]; // Value (Brightness)
// Determine what type of color the HSV pixel is.
String ctype = getPixelColorType(hVal, sVal, vVal);
// Keep count of these colors.
int totalNum = 0;
try{
totalNum = tallyColors.get(ctype);
} catch(Exception ex){
totalNum = 0;
}
totalNum++;
tallyColors.put(ctype, totalNum);
}
}
int tallyMaxIndex = 0;
int tallyMaxCount = -1;
int pixels = w * h;
for (int i=0; i<colors.length; i++) {
String v = colors[i];
int pixCount;
try{
pixCount = tallyColors.get(v);
} catch(Exception e){
pixCount = 0;
}
Log.i(TAG, v + " - " + (pixCount*100/pixels) + "%, ");
if (pixCount > tallyMaxCount) {
tallyMaxCount = pixCount;
tallyMaxIndex = i;
}
}
float percentage = initialConfidence * (tallyMaxCount * 100 / pixels);
Log.i(TAG, "Color of currency note: " + colors[tallyMaxIndex] + " (" + percentage + "% confidence).");
}
return mRgba;
}
private String getPixelColorType(int H, int S, int V)
{
String color;
if (V < 75)
color = "cBLACK";
else if (V > 190 && S < 27)
color = "cWHITE";
else if (S < 53 && V < 185)
color = "cGREY";
else { // Is a color
if (H < 14)
color = "cRED";
else if (H < 25)
color = "cORANGE";
else if (H < 34)
color = "cYELLOW";
else if (H < 73)
color = "cGREEN";
else if (H < 102)
color = "cAQUA";
else if (H < 127)
color = "cBLUE";
else if (H < 149)
color = "cPURPLE";
else if (H < 175)
color = "cPINK";
else // full circle
color = "cRED"; // back to Red
}
return color;
}
Thank you very much.
OpenCV has an Histogram method which counts all image colors. After the histogram is calculated all you would have to do is to chose the one with the biggest count...
Check here for a tutorial (C++): Histogram Calculation.
You might also the this stackoverflow answer which shows an example on how to use Android's histogram function Imgproc.calcHist().
Think about to resize your images, then you may multiply the results by the same scale:
resize( larg_image, smallerImage , interpolation=cv.CV_INTER_CUBIC );
Or,
you may check these solutions:
You could find dominant color using k-mean clustering method.
this link will be useful.
https://www.youtube.com/watch?v=f54-x3PckH8

Color detection on Vuforia frames with OpenCV for Android

I need to do color detection(ball tracking) for Augmented Reality. I want to use Qualcomms Vuforia SDK for AR and OpenCV for image processing. I found a color detection algorithm that works on desktop(OpenCV, C++) and tried to apply this to FrameMarkers(a Vuforia sample code) but no success yet.
I got a frame from Vuforia(I can only get RGB565 or GRAYSCALE frames.) and convert to OpenCV Mat object and apply same steps with desktop solution. But I got an error on HSV conversion side. Below is the code.
//HSV range for orange objects
const int H_MIN = 7;
const int S_MIN = 186;
const int V_MIN = 60;
const int H_MAX = 256;
const int S_MAX = 256;
const int V_MAX = 157;
const bool shouldUseMorphologicalOperators = true;
const int FRAME_WIDTH = 240;
const int FRAME_HEIGHT = 320;
const int MAX_NUM_OBJECTS = 50;
const int MIN_OBJECT_AREA = 20 * 20;
const int MAX_OBJECT_AREA = 320 * 240 / 1.5;
ObjectTracker::ObjectTracker()
{
x=y=0;
}
ObjectTracker::~ObjectTracker()
{
}
void ObjectTracker::track(QCAR::Frame frame)
{
int nImages = frame.getNumImages();
for(int i = 0; i < nImages; i++)
{
const QCAR::Image *image = frame.getImage(i);
if(image->getFormat() == QCAR::RGB565)
{
Mat RGB565 = Mat(image->getHeight(),image->getWidth(),CV_8UC2,(unsigned char *)image->getPixels());
Mat HSV;
//I got error an error here
cvtColor(RGB565,HSV,CV_RGB2HSV);
Mat thresholdedImage;
inRange(HSV,Scalar(H_MIN,S_MIN,V_MIN),Scalar(H_MAX,S_MAX,V_MAX),thresholdedImage);
if(shouldUseMorphologicalOperators)
applyMorphologicalOperator(thresholdedImage);
trackFilteredObject(x,y,thresholdedImage,RGB565);
//waitKey(30);
}
}
}
void ObjectTracker::applyMorphologicalOperator(Mat &thresholdedImage)
{
//create structuring element that will be used to "dilate" and "erode" image
//the element chosen here is 3px by 3px rectangle
Mat erodeElement = getStructuringElement(MORPH_RECT,Size(3,3));
//dilate with larger element so make sure object is nicely visible
Mat dilateElement = getStructuringElement(MORPH_RECT,Size(8,8));
erode(thresholdedImage,thresholdedImage,erodeElement);
erode(thresholdedImage,thresholdedImage,erodeElement);
dilate(thresholdedImage,thresholdedImage,dilateElement);
dilate(thresholdedImage,thresholdedImage,dilateElement);
}
void ObjectTracker::trackFilteredObject(int &x,int &y,Mat &thresholdedImage,Mat &cameraFeed)
{
Mat temp;
thresholdedImage.copyTo(temp);
//Two vectors needed for output of findContours
vector< vector<Point> > contours;
vector<Vec4i> hierarcy;
//find contours of filtered image using openCV findContours function
findContours(temp,contours,hierarcy,CV_RETR_CCOMP,CV_CHAIN_APPROX_SIMPLE);
//use moments method to find out filtered object
double refArea = 0;
bool objectFound = false;
if(hierarcy.size() > 0)
{
int nObjects = hierarcy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
if(nObjects < MAX_NUM_OBJECTS )
{
for(int index = 0; index >= 0; index = hierarcy[index][0])
{
Moments moment = moments((cv::Mat)contours[index]);
double area = moment.m00;
//if the area is less than 20 px by 20 px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if(area > MIN_OBJECT_AREA && area < MAX_OBJECT_AREA && area > refArea)
{
x = moment.m10/area;
y = moment.m01/area;
objectFound = true;
refArea = area;
}
else
objectFound = false;
}
//let user know you found an object
if(objectFound ==true)
{
LOG("Object found");
highlightObject(x,y,cameraFeed);
}
}
else
{
LOG("Too much noise");
}
}
else
LOG("Object not found");
}
void ObjectTracker::highlightObject(int x,int y,Mat &frame)
{
}
How to do proper conversion from RGB565 to HSV color space?
Convert it to RGB888 first using some code from this SO Question.
If you have RGB888 your conversion to HSV should work fine.
EDIT: As mentioned in the Comment. In OpenCV you can do it like this:
use cvtColor(BGR565,RGB,CV_BGR5652BGR) to conver from RGB565 to RGB and then cvtColor(RGB,HSV,CV_RGB2HSV) to convert from RGB to HSV.
EDIT2: It seems that you have to use BGR5652BGR since there is no RGB5652RGB

Categories

Resources