I am trying to write an Android App that performs histogram matching of color images using OpenCV3.1. I found this code example in C++, and converted it to java.
I tried to match each RGB chanel separately but it did not gave me the desired results. So now I'm converting the images to YUV color space and then match the Y chanels. What I hope to achieve is a brightnes matching, so if the source is brighter then the target I will get darker image.
So far it seems that the Y histogram of the final(output) image is somewhat close to the Y histogram of target image, but the actual output dosen't look like the target.
Here is the relevan code:
private Mat calculateLUT(Mat in_cdf_mat, Mat dst_cdf_mat) {
int last = 0;
double epsilon = Double.parseDouble(epsilonTextView.getText().toString());// epsilon set to 0.01
Mat M = new Mat(256, 1,CvType.CV_8UC1);
for(int j=0; j<in_cdf_mat.rows(); j++) {
double F1j = in_cdf_mat.get(j,0)[0];
for(int k = last; k < dst_cdf_mat.rows(); k++) {
double F2k = dst_cdf_mat.get(k,0)[0];
if(Math.abs(F2k - F1j) < epsilon || F2k > F1j) {
double[] data = {k} ;
M.put(j, 0, data);
last = k;
break;
}
}
}
return M;
}
private void calculateCDF (Mat channel, Mat cdf)
{
// channel holds the histogram. The indices represents the pixel color
// and the value is the amount of pixels of that color in the image
for (int i = 1; i < 256; i++) {
double[] data = new double[1];
data[0] = cdf.get(i-1,0)[0] + channel.get(i,0)[0];
cdf.put(i, 0, data);
}
}
private void calcHistogram(String imgPath, Mat y_hist, Mat y_cdf) {
Mat image;
Mat ycrcb = new Mat();
image = Imgcodecs.imread(imgPath);
Imgproc.cvtColor(image, ycrcb, Imgproc.COLOR_RGB2YCrCb);
image.release();
List<Mat> ycrcbChannels= new ArrayList<>();
Core.split(ycrcb,ycrcbChannels);
List<Mat> yList = new ArrayList<>();
yList.add(ycrcbChannels.get(0));
MatOfInt histSize = new MatOfInt(256);
MatOfFloat histRange = new MatOfFloat(0f, 256f);
Imgproc.calcHist(yList, new MatOfInt(0), new Mat(), y_hist, histSize, histRange, false);
Core.normalize(y_hist, y_hist, 3, 255, Core.NORM_MINMAX);
calculateCDF(y_hist, y_cdf);
Core.normalize(y_cdf, y_cdf, 3, 255, Core.NORM_MINMAX);
}
private void transformLight(Mat inputImage, Mat outputImage, Mat ylut) {
Mat imageYCrCb = new Mat();
Imgproc.cvtColor(inputImage, imageYCrCb, Imgproc.COLOR_RGB2YCrCb);
Mat y_chanel = new Mat();
Core.extractChannel(imageYCrCb, y_chanel, 0);
Mat cr_chanel = new Mat();
Core.extractChannel(imageYCrCb, cr_chanel, 1);
Mat cb_chanel = new Mat();
Core.extractChannel(imageYCrCb, cb_chanel, 2);
Core.LUT(y_chanel, ylut,y_chanel);
ArrayList<Mat> ycrcbDest = new ArrayList<>();
ycrcbDest.add(y_chanel);
ycrcbDest.add(cr_chanel);
ycrcbDest.add(cb_chanel);
Core.merge(ycrcbDest,outputImage);
Imgproc.cvtColor(outputImage, outputImage, Imgproc.COLOR_YCrCb2RGB);
}
private static void drawLine (Mat mat, int i, long bin_w, int hist_h, Mat histImage, Scalar color) {
// bin_w set to 1
Point p0 = new Point(bin_w * (i - 1), hist_h - Math.round(mat.get(i-1,0)[0]) );
Point p1 = new Point(bin_w * (i), hist_h - Math.round(mat.get(i,0)[0]) );
Imgproc.line(histImage, p0, p1, color, 5, 8, 0);
}
private void drawHistogram(Mat histImage, Mat graph, Scalar color) {
for (int i = 1; i < 256; i++) {
drawLine(graph, i, bin_w, histImage.rows(), histImage, color);
}
}
private void histNCDFtoFile(String filename, Mat hist, Mat cdf, Scalar histColor, Scalar cdfColor) {
Mat histImage = new Mat(256, 256, CvType.CV_8UC3);
drawHistogram(histImage, hist, histColor);
drawHistogram(histImage, cdf, cdfColor);
saveImage(filename, histImage);
}
private Mat matchHistograms(String input, String traget) {
Mat input_y_hist = new Mat();
Mat target_y_hist = new Mat();
calcHistogram(input, input_y_hist, input_y_cdf_mat);
histNCDFtoFile("inputHistNCDF.jpg", input_y_hist, input_y_cdf_mat, inputHistColor, inputCDFColor);
calcHistogram(traget, target_y_hist, target_y_cdf_mat);
histNCDFtoFile("targetHistNCDF.jpg", target_y_hist, target_y_cdf_mat, targetHistColor, targetCDFColor);
Mat ylut = calculateLUT(input_y_cdf_mat, target_y_cdf_mat);
Mat image;
Mat dst = new Mat(); // this Matrix will hold the transformed image
image = Imgcodecs.imread(input);
transformLight(image, dst, ylut);
return dst;
}
Here is an exmaple image from pixabay that I want to transform:
And this is the image that I use as a target:
And this is the result:
The CDF of the result and the target is here:
The light green is the target CDF and the light blue is the result CDF
Related
I'm new to OpenCV on Android and try to do Perspective Transform but I don't know how to use getperspectivetransform() and warpperspective() functions.I could detect rectangle from an image, but don't know how to warp.
Here is the detect rectangle function:
Mat tempMat = new Mat();
Mat src = new Mat();
Utils.bitmapToMat(image, tempMat);
Imgproc.cvtColor(tempMat, src, Imgproc.COLOR_BGR2RGB);
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
List<Mat> blurredChannel = new ArrayList<Mat>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<Mat>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve = new MatOfPoint2f();
double maxArea = 0;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = { c, 0 };
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 50, 50, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1
// ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel,
Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,
Imgproc.THRESH_BINARY,
(src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(),
Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve,
Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4),
curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.45) {
maxArea = area;
maxId = contours.indexOf(contour);
}
}
}
}
}
I draw rectangle with this statement.
if (maxId >= 0) {
Rect rect = Imgproc.boundingRect(contours.get(maxId));
Imgproc.rectangle(src, rect.tl(), rect.br(), new Scalar(255, 0, 0,
.8), 4);
}
After that I convert mat to bitmap and show on an imageview.
Here is the screenshot
So my problem is warpping, How can I warp the rectangle and rotate it?
and If it is possible, how can I improve detecting rectangle? Any hints?
(OpenCV Android SDK Ver: 3.41, Android Studio Ver: 3.01)
If you are looking to warp the detected contour into rectangle,
Get the contours of the rectangle
find convex hull of the contour
Using approxPolyDP reduce the convex hull points into 4 points
fit line to consecutive points (ex, if pts is the array, lines are fit as follows l1 = line Between(pts[0], pts[1]), l2 = line Between(pts[1], pts[2]), l3 = line Between(pts[2], pts[3]), l4 = lineBetween(pts[3], pts[0])
find the intersection between these lines, you'll end up with four points
Order the points in clockwise order (inputCorners = TopLeft, TopRight, BottomRight, BottomLeft)
create an output image with needed resolution and make the corner points in the same clockwise order ((0,0), (0, cols), (rows, cols), (rows, 0))
find homography using the function
Mat homography = Calib3d.findHomography(inputCorners, imageCorners, Calib3d.RANSAC, 10);
using the output homography matrix, warp the input image using the function
Imgproc.warpPerspective(image, outputMat, homography, new Size(image.cols(), image.rows()));
you can refer to the following link
This is my kotlin extensin version you can use it in your projects.
fun Bitmap.perspectiveTransform(srcPoints: List<org.opencv.core.Point>) :
Bitmap{
val dstWidth = max(
srcPoints[0].distanceFrom(srcPoints[1]),
srcPoints[2].distanceFrom(srcPoints[3])
)
val dstHeight = max(
srcPoints[0].distanceFrom(srcPoints[2]),
srcPoints[1].distanceFrom(srcPoints[3])
)
val dstPoints: List<org.opencv.core.Point> = listOf(
org.opencv.core.Point(0.0, 0.0),
org.opencv.core.Point(dstWidth, 0.0),
org.opencv.core.Point(0.0, dstHeight),
org.opencv.core.Point(dstWidth, dstHeight)
)
return try {
val srcMat = Converters.vector_Point2d_to_Mat(srcPoints)
val dstMat = Converters.vector_Point2d_to_Mat(dstPoints)
val perspectiveTransformation =
Imgproc.getPerspectiveTransform(srcMat, dstMat)
val inputMat = Mat(this.height, this.width, CvType.CV_8UC1)
Utils.bitmapToMat(this, inputMat)
val outPutMat = Mat(dstHeight.toInt(), dstWidth.toInt(), CvType.CV_8UC1)
Imgproc.warpPerspective(
inputMat,
outPutMat,
perspectiveTransformation,
Size(dstWidth, dstHeight)
)
val outPut = Bitmap.createBitmap(
dstWidth.toInt(),
dstHeight.toInt(), Bitmap.Config.RGB_565
)
//Imgproc.cvtColor(outPutMat , outPutMat , Imgproc.COLOR_GRAY2BGR)
Utils.matToBitmap(outPutMat , outPut)
outPut
}
catch ( e : Exception){
e.printStackTrace()
this
}
}
To use distance from I write another extension function
fun org.opencv.core.Point.distanceFrom(srcPoint: org.opencv.core.Point):
Double {
val w1 = this.x - srcPoint.x
val h1 = this.y - srcPoint.y
val distance = w1.pow(2) + h1.pow(2)
return sqrt(distance)
}
Also in this answer the correct src Points indices are :
0 : topleft
1 : topRight
2 : bottomLeft
3 : bottomRight
Good luck
I am new to OpenCV and trying to make omr scanner from scratch and have snippet which is supposed to detect filled circles but it is doing quite opposite and detecting unfilled circle. Please show me my mistake in the code.
public void showFilledCircles(Bitmap paramView)
{
paramView = BitmapFactory.decodeFile(filename);
Mat localMat1 = new Mat();
Utils.bitmapToMat(paramView, localMat1);
Object localObject1 = new Mat();
double[] lo;
Imgproc.GaussianBlur(localMat1, (Mat)localObject1, new Size(3.0D, 3.0D), 3.0D, 2.5D);
Mat localMat2 = new Mat();
Imgproc.cvtColor((Mat)localObject1, localMat2, 7);
localObject1 = new ArrayList();
Object localObject2 = new Mat();
Mat localMat3 = new Mat();
Imgproc.Canny(localMat2, localMat3, 140.0D, 255.0D);
Imgproc.findContours(localMat3, (List)localObject1, (Mat)localObject2,1,2);
int i = 0;
while (i < ((List)localObject1).size())
{
Imgproc.drawContours(localMat2, (List)localObject1, i, new Scalar(0.0D, 0.0D, 255.0D), 2);
//Log.i("Local Objects","Local Object Point -------------------"+localMat2);
i += 1;
}
localObject1 = new Mat();
Core.inRange(localMat2, new Scalar(70.0D, 70.0D, 70.0D), new Scalar(255.0D, 255.0D, 255.0D), (Mat)localObject1);
localMat2 = localMat1.clone();
Imgproc.HoughCircles((Mat)localObject1, localMat2, 3, 1.0D, 20.0D, 40.0D, 10.0D, 6, 18);
i = 0;
for (;;)
{
if (i < localMat2.cols())
{
localObject1 = localMat2.get(0, i);
lo = localMat2.get(0, i);
if (localObject1 != null) {}
}
else
{
Utils.matToBitmap(localMat1, paramView);
this.imageView.setImageBitmap(paramView);
//this.imageView.setRotation(90.0F);
return;
}
localObject2 = new Point(Math.round(lo[0]), Math.round(lo[1]));
int j = (int)Math.round(lo[2]);
Log.i("cicle Points ---------", localObject2 + " radius " + j);
Imgproc.circle(localMat1, (Point)localObject2, 1, new Scalar(0.0D, 0.0D, 255.0D), 5);
Imgproc.circle(localMat1, (Point)localObject2, j, new Scalar(255.0D, 0.0D, 0.0D), 5);
i += 1;
}
}
output image
for more precise detection of filled circle.
step 1: detect contours on image
step 2: create rectangle around detected contours.
step 3: depending on rectangle height and width choose only those contours which you want.
for filled circle detection refer to this question
Detect filled circle using opencv4Android
I want to implement K-means clustering for segmenting an image based on color intensity and actually i do not know how to get the segmented image and ROI after applying Core.kmeans function. I followed the steps in the question in here here but there is no answer to how to proceed from this point. Any help would be appreciated.
Thanks in advance.
This the code I am using based on different sources and actually it is not working
//convert to lab with three channels
Mat imgLab = new Mat();
Mat imgMat = new Mat();
Imgproc.cvtColor(imgMat, imgLab, Imgproc.COLOR_RGB2Lab, 3);
// separate channels
List<Mat> lab_planes = new ArrayList<Mat>(3);
Core.split(imgMat, lab_planes);
Mat channel = lab_planes.get(2);
channel = Mat.zeros(imgLab.rows(), imgLab.cols(), CvType.CV_8UC1);
// use only AB channels in Lab color space
lab_planes.set(2, channel);
Core.merge(lab_planes,imgLab);
Mat samples = imgLab.reshape(1, imgLab.cols() * imgLab.rows());
Mat samples32f = new Mat();
samples.convertTo(samples32f, CvType.CV_32F, 1.0 / 255.0);
Mat labels = new Mat();
TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 100, 1);
Mat centers = new Mat();
//Mat clusteredLab = new Mat();
int nColors = 3; //number of clusters (k)
int attempt = 3; //number of attempts
// repeat the clustering 3 times to avoid local minima
Core.kmeans(samples32f, nColors, labels, criteria, attempt, Core.KMEANS_PP_CENTERS, centers);
centers.convertTo(centers, CvType.CV_8UC1);
centers.reshape(3);
//the rest of code for RGB image not Lab
List<Mat> clusters = new ArrayList<Mat>();
for (int i = 0; i < centers.rows(); i++) {
clusters.add(Mat.zeros(samples.size(), samples.type()));
}
Map<Integer, Integer> counts = new HashMap<Integer, Integer>();
for (int i = 0; i < centers.rows(); i++) counts.put(i, 0);
int rows = 0;
for (int y = 0; y < samples.rows(); y++) {
for (int x = 0; x < samples.cols(); x++) {
int label = (int) labels.get(rows, 0)[0];
int r = (int) centers.get(label, 2)[0];
int g = (int) centers.get(label, 1)[0];
int b = (int) centers.get(label, 0)[0];
counts.put(label, counts.get(label) + 1);
clusters.get(label).put(y, x, b, g, r);
rows++;
}
}
return clusters;
}
I am developing an Android application to count number of people in real time video. I used OpenCv to detect people but not finding a way to count them. If any one knows how to do it then please help me.
Here is the code to detect people in coming video frame
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
List<MatOfPoint> list = new ArrayList<>();
Mat frame = new Mat();
Mat gray = new Mat();
Mat hierarchy = new Mat();
Mat originalFrame = inputFrame.rgba();
Core.transpose(originalFrame,mRgbaT);
Imgproc.resize(mRgbaT,mRgbaF,mRgbaF.size(),0,0,0);
Core.flip(mRgbaF,originalFrame,1);
Imgproc.medianBlur(originalFrame,originalFrame,3);
Imgproc.cvtColor(originalFrame, gray, Imgproc.CV_BGR2GRAY, 0);
HOGDescriptor hog = new HOGDescriptor();
//Получаем стандартный определитель людей и устанавливаем его нашему дескриптору
MatOfFloat descriptors = HOGDescriptor.getDefaultPeopleDetector();
// MatOfFloat dfdfdf=HOGDescriptor.getDefaultPeopleDetector();
hog.setSVMDetector(descriptors);
MatOfRect locations = new MatOfRect();
MatOfDouble weights = new MatOfDouble();
hog.detectMultiScale(gray, locations, weights);
Point rectPoint1 = new Point();
Point rectPoint2 = new Point();
Point fontPoint = new Point();
if (locations.rows() > 0) {
List<Rect> rectangles = locations.toList();
for (Rect rect : rectangles) {
rectPoint1.x = rect.x;
rectPoint1.y = rect.y;
fontPoint.x = rect.x;
fontPoint.y = rect.y - 4;
rectPoint2.x = rect.x + rect.width;
rectPoint2.y = rect.y + rect.height;
final Scalar rectColor = new Scalar( 0 , 0 , 0 );
// Добавляем на изображения найденную информацию
Imgproc.rectangle(originalFrame, rectPoint1, rectPoint2, rectColor, 2);
}
}
frame.release();
gray.release();
hierarchy.release();
int i=list.size();
list.clear();
return originalFrame;
}
In the following code, I have carried out the following steps:
Loaded an image from sdcard.
Converted it to HSV format.
Used inRange function to mask out the red color.
Used findContours to find the contours.
Find the largest contour from those contours.
Created an ROI around the largest contour using boundingRect and submat functions.
Converted this ROI Mat to HSV format.
Iterated through the ROI Mat, and check for each pixel if it lies within the largest contour. I used the method pointPolygonTest to find this out, but it returns -1 for every pixel, as can be seen from the Log.i output I have pasted here. The question is why? How can I correct this.
private Scalar detectColoredBlob() {
rgbaFrame = Highgui.imread("/mnt/sdcard/DCIM/rgbaMat4Mask.bmp");
Mat hsvImage = new Mat();
Imgproc.cvtColor(rgbaFrame, hsvImage, Imgproc.COLOR_BGR2HSV);
Highgui.imwrite("/mnt/sdcard/DCIM/hsvImage.bmp", hsvImage);// check
Mat maskedImage = new Mat();
Core.inRange(hsvImage, new Scalar(0, 100, 100), new Scalar(10, 255, 255), maskedImage);
Highgui.imwrite("/mnt/sdcard/DCIM/maskedImage.bmp", maskedImage);// check
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(maskedImage, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
// \/ We will use only the largest contour. Other contours (any other possible blobs of this color range) will be ignored.
MatOfPoint largestContour = contours.get(0);
double largestContourArea = Imgproc.contourArea(largestContour);
for (int i = 1; i < contours.size(); ++i) {// NB Notice the prefix increment.
MatOfPoint currentContour = contours.get(i);
double currentContourArea = Imgproc.contourArea(currentContour);
if (currentContourArea > largestContourArea) {
largestContourArea = currentContourArea;
largestContour = currentContour;
}
}
MatOfPoint2f largestContour2f = new MatOfPoint2f(largestContour.toArray());// Required on Line 289. See http://stackoverflow.com/questions/11273588/how-to-convert-matofpoint-to-matofpoint2f-in-opencv-java-api
Rect detectedBlobRoi = Imgproc.boundingRect(largestContour);
Mat detectedBlobRgba = rgbaFrame.submat(detectedBlobRoi);
Highgui.imwrite("/mnt/sdcard/DCIM/detectedBlobRgba.bmp", detectedBlobRgba);// check
Mat detectedBlobHsv = new Mat();
Imgproc.cvtColor(detectedBlobRgba, detectedBlobHsv, Imgproc.COLOR_BGR2HSV);
Highgui.imwrite("/mnt/sdcard/DCIM/roiHsv.bmp", detectedBlobHsv);// check
for (int firstCoordinate = 0; firstCoordinate < detectedBlobHsv.rows(); firstCoordinate++) {
for (int secondCoordinate = 0; secondCoordinate < detectedBlobHsv.cols(); secondCoordinate++) {
Log.i(TAG, "HAPPY " + Arrays.toString(detectedBlobHsv.get(firstCoordinate, secondCoordinate)));
if (Imgproc.pointPolygonTest(largestContour2f, new Point(firstCoordinate, secondCoordinate), false) == -1) {
Log.i(TAG, "HAPPY ....................... OUTSIDE");
}
}
}
Highgui.imwrite("/mnt/sdcard/DCIM/processedcontoured.bmp", detectedBlobHsv);// check
EDIT:
I am doing this because I need to compute the average HSV color of pixels lying inside the contour (i.e. the average HSV color of the biggest red colored blob). If I computed the average color of the ROI detectedBlobHsv by the normal formula, I would do something like
Scalar averageHsvColor= new Scalar(256);
Scalar sumHsvOfPixels = new Scalar(256);
sumHsvOfPixels = Core.sumElems(detectedBlobHsv);
int numOfPixels = detectedBlobHsv.width() * detectedBlobHsv.height();
for (int channel=0; channel<sumHsvOfPixels.val.length; channel++) {
averageHsvColor = sumHsvOfPixels.val[channel]/numOfPixels;
}
So somebody here on SO (probably you?) had suggested me a way to exclude pixels outside my contour a while back. I'd implement that like:
//Giving pixels outside contour of interest an HSV value of `double[]{0,0,0}`, so that they don't affect the computation of `sumHsvOfPixels` while computing average,
//and while keeping track of the number of pixels removed from computation this way, so we can subtract that number from the `$numOfPixels` during computation of average.
int pixelsRemoved = 0;
for (int row=0; row<detectedBlobHsv.rows(); row++) {
for (int col=0; col<detectedBlobHsv.cols(); col++) {
if (Imgproc.pointPolygonTest(largestContour2f, new Point(row, col), false) == -1) {
detectedBlobHsv.put(row, col, new double[]{0,0,0});
pixelsRemoved++;
}
}
}
Then compute the average like
Scalar averageHsvColor= new Scalar(256);
Scalar sumHsvOfPixels = new Scalar(256);
sumHsvOfPixels = Core.sumElems(detectedBlobHsv); //This will now exclude pixels outside the contour
int numOfPixels = ( detectedBlobHsv.width()*detectedBlobHsv.height() )-pixelsRemoved;
for (int channel=0; channel<sumHsvOfPixels.val.length; channel++) {
averageHsvColor = sumHsvOfPixels.val[channel]/numOfPixels;
}
EDIT 1:
Towards the end of the following method, I have created the mask with a list of MatOfPoints which contains the largest contour only. When I wrote it to SDCard, I got
I don't know where I messed up!
private Scalar detectColoredBlob() {
//Highgui.imwrite("/mnt/sdcard/DCIM/rgbaFrame.jpg", rgbaFrame);// check
rgbaFrame = Highgui.imread("/mnt/sdcard/DCIM/rgbaMat4Mask.bmp");
//GIVING A UNIFORM VALUE OF 255 TO THE V CHANNEL OF EACH PIXEL (255 IS THE MAXIMUM VALUE OF V ALLOWED - Simulating a maximum light condition)
for (int firstCoordinate = 0; firstCoordinate < rgbaFrame.rows(); firstCoordinate++) {
for (int secondCoordinate = 0; secondCoordinate < rgbaFrame.cols(); secondCoordinate++) {
double[] pixelChannels = rgbaFrame.get(firstCoordinate, secondCoordinate);
pixelChannels[2] = 255;
rgbaFrame.put(firstCoordinate, secondCoordinate, pixelChannels);
}
}
Mat hsvImage = new Mat();
Imgproc.cvtColor(rgbaFrame, hsvImage, Imgproc.COLOR_BGR2HSV);
Highgui.imwrite("/mnt/sdcard/DCIM/hsvImage.bmp", hsvImage);// check
Mat maskedImage = new Mat();
Core.inRange(hsvImage, new Scalar(0, 100, 100), new Scalar(10, 255, 255), maskedImage);
Highgui.imwrite("/mnt/sdcard/DCIM/maskedImage.bmp", maskedImage);// check
// Mat dilatedMat = new Mat();
// Imgproc.dilate(maskedImage, dilatedMat, new Mat());
// Highgui.imwrite("/mnt/sdcard/DCIM/dilatedMat.jpg", dilatedMat);// check
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Imgproc.findContours(maskedImage, contours, new Mat(), Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
//FINDING THE BIGGEST CONTOUR
// \/ We will use only the largest contour. Other contours (any other possible blobs of this color range) will be ignored.
MatOfPoint largestContour = contours.get(0);
double largestContourArea = Imgproc.contourArea(largestContour);
for (int i = 1; i < contours.size(); ++i) {// NB Notice the prefix increment.
MatOfPoint currentContour = contours.get(i);
double currentContourArea = Imgproc.contourArea(currentContour);
if (currentContourArea > largestContourArea) {
largestContourArea = currentContourArea;
largestContour = currentContour;
}
}
Rect detectedBlobRoi = Imgproc.boundingRect(largestContour);
Mat detectedBlobRgba = rgbaFrame.submat(detectedBlobRoi);
Highgui.imwrite("/mnt/sdcard/DCIM/detectedBlobRgba.bmp", detectedBlobRgba);// check
Mat detectedBlobHsv = new Mat();
Imgproc.cvtColor(detectedBlobRgba, detectedBlobHsv, Imgproc.COLOR_BGR2HSV);
Highgui.imwrite("/mnt/sdcard/DCIM/roiHsv.bmp", detectedBlobHsv);// check
List<MatOfPoint> largestContourList = new ArrayList<>();
largestContourList.add(largestContour);
Mat roiWithMask = new Mat(detectedBlobHsv.rows(), detectedBlobHsv.cols(), CvType.CV_8UC3);
roiWithMask.setTo(new Scalar(0,0,0));
Imgproc.drawContours(roiWithMask, largestContourList, 0, new Scalar(0, 255, 255), -1);//TODO Using -1 instead of CV_FILLED.
Highgui.imwrite("/mnt/sdcard/DCIM/roiWithMask.bmp", roiWithMask);// check
// CALCULATING THE AVERAGE COLOR OF THE DETECTED BLOB
// STEP 1:
double [] averageHsvColor = new double[]{0,0,0};
int numOfPixels = 0;
for (int firstCoordinate = 0; firstCoordinate < detectedBlobHsv.rows(); ++firstCoordinate) {
for (int secondCoordinate = 0; secondCoordinate < detectedBlobHsv.cols(); ++secondCoordinate) {
double hue = roiWithMask.get(firstCoordinate, secondCoordinate)[0];
double saturation = roiWithMask.get(firstCoordinate, secondCoordinate)[1];
double value = roiWithMask.get(firstCoordinate, secondCoordinate)[2];
averageHsvColor[0] += hue;
averageHsvColor[1] += saturation;
averageHsvColor[2] += value;
numOfPixels++;
}
}
averageHsvColor[0] /= numOfPixels;
averageHsvColor[1] /= numOfPixels;
averageHsvColor[1] /= numOfPixels;
return new Scalar(averageHsvColor);
}
EDIT 2:
I corrected my 3 channel mask and made a single channel mask
Mat roiMask = new Mat(rgbaFrame.rows(), rgbaFrame.cols(), CvType.CV_8UC1);
roiMask.setTo(new Scalar(0));
Imgproc.drawContours(roiMask, largestContourList, 0, new Scalar(255), -1);
and this resulted in the correct roiMask:
Then, before the comment // CALCULATING THE AVERAGE COLOR OF THE DETECTED BLOB, I added:
Mat newImageWithRoi = new Mat(rgbaFrame.rows(), rgbaFrame.cols(), CvType.CV_8UC3);
newImageWithRoi.setTo(new Scalar(0, 0, 0));
rgbaFrame.copyTo(newImageWithRoi, roiMask);
Highgui.imwrite("/mnt/sdcard/DCIM/newImageWithRoi.bmp", newImageWithRoi);//check
This resulted in:
Now again I don't know how to proceed. :s
You don't need to use pointPolygonTest, because you already have the mask.
You can simply sum up the values that lies on the mask. Something along the lines of (not able to test this):
// Initialize at 0!!!
Scalar averageHsvColor= new Scalar(0,0,0);
int numOfPixels = 0;
for(int r=0; r<detectedBlobHsv.height(); ++r)
{
for(int c=0; c<detectedBlobHsv.width(); ++c)
{
if( /* value of mask(r,c) > 0 */)
{
int H = // get H value of pixel at (r, c)
int S = // get S value of pixel at (r, c)
int V = // get V value of pixel at (r, c)
// Sum values
averageHsvColor[0] += H;
averageHsvColor[1] += S;
averageHsvColor[2] += V;
// Increment number of pixels inside mask
numOfPixels ++;
}
}
}
// Compute average
averageHsvColor[0] /= numOfPixels ;
averageHsvColor[1] /= numOfPixels ;
averageHsvColor[2] /= numOfPixels ;