I am making Android App which can make Perspective Transform Image.
I want to do same thing like below code.
I tried but I cant read C code. Please advise me!
Thanks,
Shoichi
Sample Code(I want to do)
#include <cv.h>
#include <highgui.h>
int
main (int argc, char **argv)
{
IplImage *src_img = 0, *dst_img = 0;
CvMat *map_matrix;
CvPoint2D32f src_pnt[4], dst_pnt[4];
if (argc >= 2)
src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
if (src_img == 0)
return -1;
dst_img = cvCloneImage (src_img);
src_pnt[0] = cvPoint2D32f (150.0, 150.0);
src_pnt[1] = cvPoint2D32f (150.0, 300.0);
src_pnt[2] = cvPoint2D32f (350.0, 300.0);
src_pnt[3] = cvPoint2D32f (350.0, 150.0);
dst_pnt[0] = cvPoint2D32f (200.0, 200.0);
dst_pnt[1] = cvPoint2D32f (150.0, 300.0);
dst_pnt[2] = cvPoint2D32f (350.0, 300.0);
dst_pnt[3] = cvPoint2D32f (300.0, 200.0);
map_matrix = cvCreateMat (3, 3, CV_32FC1);
cvGetPerspectiveTransform (src_pnt, dst_pnt, map_matrix);
cvWarpPerspective (src_img, dst_img, map_matrix, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll (100));
cvNamedWindow ("src", CV_WINDOW_AUTOSIZE);
cvNamedWindow ("dst", CV_WINDOW_AUTOSIZE);
cvShowImage ("src", src_img);
cvShowImage ("dst", dst_img);
cvWaitKey (0);
cvDestroyWindow ("src");
cvDestroyWindow ("dst");
cvReleaseImage (&src_img);
cvReleaseImage (&dst_img);
cvReleaseMat (&map_matrix);
return 1;
}
My Code
#Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "onCreate");
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
setContentView(R.layout.main);
Resources r = getResources();
Bitmap inputBitmap = BitmapFactory.decodeResource(r, R.drawable.icon);
Mat inputMat = Utils.bitmapToMat(inputBitmap);
Mat outputMat = inputMat.clone();
List<Point> src_pnt = new ArrayList<Point>();
Point p0 = new Point(75.0, 75.0);
src_pnt.add(p0);
Point p1 = new Point(75.0, 100.0);
src_pnt.add(p1);
Point p2 = new Point(100.0, 100.0);
src_pnt.add(p2);
Point p3 = new Point(100.0, 75.0);
src_pnt.add(p3);
Mat startM = Converters.vector_Point2f_to_Mat(src_pnt);
List<Point> dst_pnt = new ArrayList<Point>();
Point p4 = new Point(75.0, 75.0);
dst_pnt.add(p4);
Point p5 = new Point(75.0, 100.0);
dst_pnt.add(p5);
Point p6 = new Point(100.0, 100.0);
dst_pnt.add(p6);
Point p7 = new Point(100.0, 75.0);
dst_pnt.add(p7);
Mat endM = Converters.vector_Point2f_to_Mat(dst_pnt);
Mat M = new Mat(3, 3, CvType.CV_32F);
Core.perspectiveTransform(startM, endM, M);
Size size = new Size(200.0, 200.0);
Scalar scalar = new Scalar(50.0);
Imgproc.warpPerspective(inputMat, outputMat, M, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS, Imgproc.BORDER_DEFAULT, scalar);
Bitmap outputBitmap = inputBitmap;
Utils.matToBitmap(outputMat, outputBitmap);
ImageView imageView1 = (ImageView) findViewById(R.id.imageView1);
imageView1.setImageBitmap(outputBitmap);
}
Your dst_pnt points are same as src_pnt, so transformation matrix will be identity and will not change image at all. Use some other points.
Secondly, I think 4th argument to warPerspective (size) should be size of outputMap, so if you want a 200x200 image,instead of
Mat outputMat = inputMat.clone();
Use
Mat outputMat = new Mat(200, 200);
Related
I'm new to OpenCV on Android and try to do Perspective Transform but I don't know how to use getperspectivetransform() and warpperspective() functions.I could detect rectangle from an image, but don't know how to warp.
Here is the detect rectangle function:
Mat tempMat = new Mat();
Mat src = new Mat();
Utils.bitmapToMat(image, tempMat);
Imgproc.cvtColor(tempMat, src, Imgproc.COLOR_BGR2RGB);
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
List<Mat> blurredChannel = new ArrayList<Mat>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<Mat>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve = new MatOfPoint2f();
double maxArea = 0;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = { c, 0 };
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 50, 50, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1
// ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel,
Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,
Imgproc.THRESH_BINARY,
(src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(),
Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve,
Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4),
curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.45) {
maxArea = area;
maxId = contours.indexOf(contour);
}
}
}
}
}
I draw rectangle with this statement.
if (maxId >= 0) {
Rect rect = Imgproc.boundingRect(contours.get(maxId));
Imgproc.rectangle(src, rect.tl(), rect.br(), new Scalar(255, 0, 0,
.8), 4);
}
After that I convert mat to bitmap and show on an imageview.
Here is the screenshot
So my problem is warpping, How can I warp the rectangle and rotate it?
and If it is possible, how can I improve detecting rectangle? Any hints?
(OpenCV Android SDK Ver: 3.41, Android Studio Ver: 3.01)
If you are looking to warp the detected contour into rectangle,
Get the contours of the rectangle
find convex hull of the contour
Using approxPolyDP reduce the convex hull points into 4 points
fit line to consecutive points (ex, if pts is the array, lines are fit as follows l1 = line Between(pts[0], pts[1]), l2 = line Between(pts[1], pts[2]), l3 = line Between(pts[2], pts[3]), l4 = lineBetween(pts[3], pts[0])
find the intersection between these lines, you'll end up with four points
Order the points in clockwise order (inputCorners = TopLeft, TopRight, BottomRight, BottomLeft)
create an output image with needed resolution and make the corner points in the same clockwise order ((0,0), (0, cols), (rows, cols), (rows, 0))
find homography using the function
Mat homography = Calib3d.findHomography(inputCorners, imageCorners, Calib3d.RANSAC, 10);
using the output homography matrix, warp the input image using the function
Imgproc.warpPerspective(image, outputMat, homography, new Size(image.cols(), image.rows()));
you can refer to the following link
This is my kotlin extensin version you can use it in your projects.
fun Bitmap.perspectiveTransform(srcPoints: List<org.opencv.core.Point>) :
Bitmap{
val dstWidth = max(
srcPoints[0].distanceFrom(srcPoints[1]),
srcPoints[2].distanceFrom(srcPoints[3])
)
val dstHeight = max(
srcPoints[0].distanceFrom(srcPoints[2]),
srcPoints[1].distanceFrom(srcPoints[3])
)
val dstPoints: List<org.opencv.core.Point> = listOf(
org.opencv.core.Point(0.0, 0.0),
org.opencv.core.Point(dstWidth, 0.0),
org.opencv.core.Point(0.0, dstHeight),
org.opencv.core.Point(dstWidth, dstHeight)
)
return try {
val srcMat = Converters.vector_Point2d_to_Mat(srcPoints)
val dstMat = Converters.vector_Point2d_to_Mat(dstPoints)
val perspectiveTransformation =
Imgproc.getPerspectiveTransform(srcMat, dstMat)
val inputMat = Mat(this.height, this.width, CvType.CV_8UC1)
Utils.bitmapToMat(this, inputMat)
val outPutMat = Mat(dstHeight.toInt(), dstWidth.toInt(), CvType.CV_8UC1)
Imgproc.warpPerspective(
inputMat,
outPutMat,
perspectiveTransformation,
Size(dstWidth, dstHeight)
)
val outPut = Bitmap.createBitmap(
dstWidth.toInt(),
dstHeight.toInt(), Bitmap.Config.RGB_565
)
//Imgproc.cvtColor(outPutMat , outPutMat , Imgproc.COLOR_GRAY2BGR)
Utils.matToBitmap(outPutMat , outPut)
outPut
}
catch ( e : Exception){
e.printStackTrace()
this
}
}
To use distance from I write another extension function
fun org.opencv.core.Point.distanceFrom(srcPoint: org.opencv.core.Point):
Double {
val w1 = this.x - srcPoint.x
val h1 = this.y - srcPoint.y
val distance = w1.pow(2) + h1.pow(2)
return sqrt(distance)
}
Also in this answer the correct src Points indices are :
0 : topleft
1 : topRight
2 : bottomLeft
3 : bottomRight
Good luck
InputImage
ResultImage
I have been able to filter the largest contour in the image to detect the token.
I have applied warp perception but it is only cropping the image at the edges of the contour, nothing else.
I want the detected token to be cropped out of the rest of the image entireley, de-skew it while keeping proportions so the result image should be upright, straight. Then I will move forward with finding the blobs in the token to detect the dates marked inside it.
private Mat processMat(Mat srcMat) {
Mat processedMat = new Mat();
Imgproc.cvtColor(srcMat, processedMat, Imgproc.COLOR_BGR2GRAY);
Imgproc.GaussianBlur(processedMat, processedMat, new Size(5, 5), 5);
Imgproc.threshold(processedMat, processedMat, 127, 255, Imgproc.THRESH_BINARY);
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Imgproc.findContours(processedMat, contours, hierarchy, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
double maxVal = 0;
int maxValIdx = 0;
for (int contourIdx = 0; contourIdx < contours.size(); contourIdx++) {
double contourArea = Imgproc.contourArea(contours.get(contourIdx));
if (maxVal < contourArea) {
maxVal = contourArea;
maxValIdx = contourIdx;
}
}
if (!contours.isEmpty()) {
Imgproc.drawContours(srcMat, contours, maxValIdx, new Scalar(0,255,0), 3);
Rect rect = Imgproc.boundingRect(contours.get(maxValIdx));
Log.e("rect", "" + rect);
int top = srcMat.height();
int left = srcMat.width();
int right = 0;
int bottom = 0;
if(rect.x < left) {
left = rect.x;
}
if(rect.x+rect.width > right){
right = rect.x+rect.width;
}
if(rect.y < top){
top = rect.y;
}
if(rect.y+rect.height > bottom){
bottom = rect.y+rect.height;
}
Point topLeft = new Point(left, top);
Point topRight = new Point(right, top);
Point bottomRight = new Point(right, bottom);
Point bottomLeft = new Point(left, bottom);
return warp(srcMat, topLeft, topRight, bottomLeft, bottomRight);
}
return null;
}
Mat warp(Mat inputMat, Point topLeft, Point topRight, Point bottomLeft, Point bottomRight) {
int resultWidth = (int)(topRight.x - topLeft.x);
int bottomWidth = (int)(bottomRight.x - bottomLeft.x);
if(bottomWidth > resultWidth)
resultWidth = bottomWidth;
int resultHeight = (int)(bottomLeft.y - topLeft.y);
int bottomHeight = (int)(bottomRight.y - topRight.y);
if (bottomHeight > resultHeight) {
resultHeight = bottomHeight;
}
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC1);
List<Point> source = new ArrayList<>();
source.add(topLeft);
source.add(topRight);
source.add(bottomLeft);
source.add(bottomRight);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Point ocvPOut1 = new Point(0, 0);
Point ocvPOut2 = new Point(resultWidth, 0);
Point ocvPOut3 = new Point(0, resultHeight);
Point ocvPOut4 = new Point(resultWidth, resultHeight);
List<Point> dest = new ArrayList<>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight));
return outputMat;
}
UPDATE 1
Replaced This:
return warp(srcMat, topLeft, topRight, bottomLeft, bottomRight);
With This:
return warp(srcMat, topLeft, topRight, bottomRight, bottomLeft);
Result Update 1:
UPDATE 2
public Mat warp(Mat inputMat, MatOfPoint selectedContour) {
MatOfPoint2f new_mat = new MatOfPoint2f(selectedContour.toArray());
MatOfPoint2f approxCurve_temp = new MatOfPoint2f();
int contourSize = (int) selectedContour.total();
Imgproc.approxPolyDP(new_mat, approxCurve_temp, contourSize * 0.05, true);
double[] temp_double;
temp_double = approxCurve_temp.get(0,0);
Point p1 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve_temp.get(1,0);
Point p2 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve_temp.get(2,0);
Point p3 = new Point(temp_double[0], temp_double[1]);
temp_double = approxCurve_temp.get(3,0);
Point p4 = new Point(temp_double[0], temp_double[1]);
List<Point> source = new ArrayList<Point>();
source.add(p1);
source.add(p2);
source.add(p3);
source.add(p4);
Mat startM = Converters.vector_Point2f_to_Mat(source);
int resultWidth = 846;
int resultHeight = 2048;
Mat outputMat = new Mat(resultWidth, resultHeight, CvType.CV_8UC4);
Point ocvPOut1 = new Point(0, 0);
Point ocvPOut2 = new Point(0, resultHeight);
Point ocvPOut3 = new Point(resultWidth, resultHeight);
Point ocvPOut4 = new Point(resultWidth, 0);
List<Point> dest = new ArrayList<Point>();
dest.add(ocvPOut1);
dest.add(ocvPOut2);
dest.add(ocvPOut3);
dest.add(ocvPOut4);
Mat endM = Converters.vector_Point2f_to_Mat(dest);
Mat perspectiveTransform = Imgproc.getPerspectiveTransform(startM, endM);
Imgproc.warpPerspective(inputMat, outputMat, perspectiveTransform, new Size(resultWidth, resultHeight),
Imgproc.INTER_CUBIC);
return outputMat;
}
Result Update 2:
I have changed my warp function a bit and the code is attached.
However the resultant image is rotated somehow in the wrong direction. Can you guide me which is the correct way to do this.
Android device orientation is set to: portrait and the input image is in portrait as well.
UPDATE 3
I have managed to straighten the token by sorting the corners like so:
List<Point> source = new ArrayList<Point>();
source.add(p2);
source.add(p3);
source.add(p4);
source.add(p1);
Mat startM = Converters.vector_Point2f_to_Mat(source);
Result Update 3:
However the resultant image is cropped from the left side which I have no idea how to tackle that.
I have managed to straighten the input image if the token is tilted to the right or left and the output image is straight nonetheless. However if the input image already has the token centred and straight up. it rotates the token like so, using the same code:
Issue Update 3:
The transformation to deskew the ticket is close to an affine one. You can obtain it by approximating the outline with a parallelogram. You find the vertices of the parallelogram as the leftmost, topmost, rightmost and bottommost points.
Actually, you just need three vertices (and the fourth can be recomputed from these). Maybe a least-square fitting of the parallelogram is possible, I don't know.
Another option is to consider an homographic transform, which is defined from four points (but the computation is much more complex). It will take perspective into account. (You might get some insight here: https://www.codeproject.com/Articles/674433/Perspective-Projection-of-a-Rectangle-Homography.)
To straighten up the image, it suffice to apply the inverse transform and retrieve a rectangle. Anyway, you will notice that the size of this rectangle is unknown, so that you can scale it arbitrarily. The hardest issue is to find a suitable aspect ratio.
I want to implement K-means clustering for segmenting an image based on color intensity and actually i do not know how to get the segmented image and ROI after applying Core.kmeans function. I followed the steps in the question in here here but there is no answer to how to proceed from this point. Any help would be appreciated.
Thanks in advance.
This the code I am using based on different sources and actually it is not working
//convert to lab with three channels
Mat imgLab = new Mat();
Mat imgMat = new Mat();
Imgproc.cvtColor(imgMat, imgLab, Imgproc.COLOR_RGB2Lab, 3);
// separate channels
List<Mat> lab_planes = new ArrayList<Mat>(3);
Core.split(imgMat, lab_planes);
Mat channel = lab_planes.get(2);
channel = Mat.zeros(imgLab.rows(), imgLab.cols(), CvType.CV_8UC1);
// use only AB channels in Lab color space
lab_planes.set(2, channel);
Core.merge(lab_planes,imgLab);
Mat samples = imgLab.reshape(1, imgLab.cols() * imgLab.rows());
Mat samples32f = new Mat();
samples.convertTo(samples32f, CvType.CV_32F, 1.0 / 255.0);
Mat labels = new Mat();
TermCriteria criteria = new TermCriteria(TermCriteria.EPS + TermCriteria.MAX_ITER, 100, 1);
Mat centers = new Mat();
//Mat clusteredLab = new Mat();
int nColors = 3; //number of clusters (k)
int attempt = 3; //number of attempts
// repeat the clustering 3 times to avoid local minima
Core.kmeans(samples32f, nColors, labels, criteria, attempt, Core.KMEANS_PP_CENTERS, centers);
centers.convertTo(centers, CvType.CV_8UC1);
centers.reshape(3);
//the rest of code for RGB image not Lab
List<Mat> clusters = new ArrayList<Mat>();
for (int i = 0; i < centers.rows(); i++) {
clusters.add(Mat.zeros(samples.size(), samples.type()));
}
Map<Integer, Integer> counts = new HashMap<Integer, Integer>();
for (int i = 0; i < centers.rows(); i++) counts.put(i, 0);
int rows = 0;
for (int y = 0; y < samples.rows(); y++) {
for (int x = 0; x < samples.cols(); x++) {
int label = (int) labels.get(rows, 0)[0];
int r = (int) centers.get(label, 2)[0];
int g = (int) centers.get(label, 1)[0];
int b = (int) centers.get(label, 0)[0];
counts.put(label, counts.get(label) + 1);
clusters.get(label).put(y, x, b, g, r);
rows++;
}
}
return clusters;
}
I am developing an Android application to count number of people in real time video. I used OpenCv to detect people but not finding a way to count them. If any one knows how to do it then please help me.
Here is the code to detect people in coming video frame
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
List<MatOfPoint> list = new ArrayList<>();
Mat frame = new Mat();
Mat gray = new Mat();
Mat hierarchy = new Mat();
Mat originalFrame = inputFrame.rgba();
Core.transpose(originalFrame,mRgbaT);
Imgproc.resize(mRgbaT,mRgbaF,mRgbaF.size(),0,0,0);
Core.flip(mRgbaF,originalFrame,1);
Imgproc.medianBlur(originalFrame,originalFrame,3);
Imgproc.cvtColor(originalFrame, gray, Imgproc.CV_BGR2GRAY, 0);
HOGDescriptor hog = new HOGDescriptor();
//Получаем стандартный определитель людей и устанавливаем его нашему дескриптору
MatOfFloat descriptors = HOGDescriptor.getDefaultPeopleDetector();
// MatOfFloat dfdfdf=HOGDescriptor.getDefaultPeopleDetector();
hog.setSVMDetector(descriptors);
MatOfRect locations = new MatOfRect();
MatOfDouble weights = new MatOfDouble();
hog.detectMultiScale(gray, locations, weights);
Point rectPoint1 = new Point();
Point rectPoint2 = new Point();
Point fontPoint = new Point();
if (locations.rows() > 0) {
List<Rect> rectangles = locations.toList();
for (Rect rect : rectangles) {
rectPoint1.x = rect.x;
rectPoint1.y = rect.y;
fontPoint.x = rect.x;
fontPoint.y = rect.y - 4;
rectPoint2.x = rect.x + rect.width;
rectPoint2.y = rect.y + rect.height;
final Scalar rectColor = new Scalar( 0 , 0 , 0 );
// Добавляем на изображения найденную информацию
Imgproc.rectangle(originalFrame, rectPoint1, rectPoint2, rectColor, 2);
}
}
frame.release();
gray.release();
hierarchy.release();
int i=list.size();
list.clear();
return originalFrame;
}
I am trying to write an Android App that performs histogram matching of color images using OpenCV3.1. I found this code example in C++, and converted it to java.
I tried to match each RGB chanel separately but it did not gave me the desired results. So now I'm converting the images to YUV color space and then match the Y chanels. What I hope to achieve is a brightnes matching, so if the source is brighter then the target I will get darker image.
So far it seems that the Y histogram of the final(output) image is somewhat close to the Y histogram of target image, but the actual output dosen't look like the target.
Here is the relevan code:
private Mat calculateLUT(Mat in_cdf_mat, Mat dst_cdf_mat) {
int last = 0;
double epsilon = Double.parseDouble(epsilonTextView.getText().toString());// epsilon set to 0.01
Mat M = new Mat(256, 1,CvType.CV_8UC1);
for(int j=0; j<in_cdf_mat.rows(); j++) {
double F1j = in_cdf_mat.get(j,0)[0];
for(int k = last; k < dst_cdf_mat.rows(); k++) {
double F2k = dst_cdf_mat.get(k,0)[0];
if(Math.abs(F2k - F1j) < epsilon || F2k > F1j) {
double[] data = {k} ;
M.put(j, 0, data);
last = k;
break;
}
}
}
return M;
}
private void calculateCDF (Mat channel, Mat cdf)
{
// channel holds the histogram. The indices represents the pixel color
// and the value is the amount of pixels of that color in the image
for (int i = 1; i < 256; i++) {
double[] data = new double[1];
data[0] = cdf.get(i-1,0)[0] + channel.get(i,0)[0];
cdf.put(i, 0, data);
}
}
private void calcHistogram(String imgPath, Mat y_hist, Mat y_cdf) {
Mat image;
Mat ycrcb = new Mat();
image = Imgcodecs.imread(imgPath);
Imgproc.cvtColor(image, ycrcb, Imgproc.COLOR_RGB2YCrCb);
image.release();
List<Mat> ycrcbChannels= new ArrayList<>();
Core.split(ycrcb,ycrcbChannels);
List<Mat> yList = new ArrayList<>();
yList.add(ycrcbChannels.get(0));
MatOfInt histSize = new MatOfInt(256);
MatOfFloat histRange = new MatOfFloat(0f, 256f);
Imgproc.calcHist(yList, new MatOfInt(0), new Mat(), y_hist, histSize, histRange, false);
Core.normalize(y_hist, y_hist, 3, 255, Core.NORM_MINMAX);
calculateCDF(y_hist, y_cdf);
Core.normalize(y_cdf, y_cdf, 3, 255, Core.NORM_MINMAX);
}
private void transformLight(Mat inputImage, Mat outputImage, Mat ylut) {
Mat imageYCrCb = new Mat();
Imgproc.cvtColor(inputImage, imageYCrCb, Imgproc.COLOR_RGB2YCrCb);
Mat y_chanel = new Mat();
Core.extractChannel(imageYCrCb, y_chanel, 0);
Mat cr_chanel = new Mat();
Core.extractChannel(imageYCrCb, cr_chanel, 1);
Mat cb_chanel = new Mat();
Core.extractChannel(imageYCrCb, cb_chanel, 2);
Core.LUT(y_chanel, ylut,y_chanel);
ArrayList<Mat> ycrcbDest = new ArrayList<>();
ycrcbDest.add(y_chanel);
ycrcbDest.add(cr_chanel);
ycrcbDest.add(cb_chanel);
Core.merge(ycrcbDest,outputImage);
Imgproc.cvtColor(outputImage, outputImage, Imgproc.COLOR_YCrCb2RGB);
}
private static void drawLine (Mat mat, int i, long bin_w, int hist_h, Mat histImage, Scalar color) {
// bin_w set to 1
Point p0 = new Point(bin_w * (i - 1), hist_h - Math.round(mat.get(i-1,0)[0]) );
Point p1 = new Point(bin_w * (i), hist_h - Math.round(mat.get(i,0)[0]) );
Imgproc.line(histImage, p0, p1, color, 5, 8, 0);
}
private void drawHistogram(Mat histImage, Mat graph, Scalar color) {
for (int i = 1; i < 256; i++) {
drawLine(graph, i, bin_w, histImage.rows(), histImage, color);
}
}
private void histNCDFtoFile(String filename, Mat hist, Mat cdf, Scalar histColor, Scalar cdfColor) {
Mat histImage = new Mat(256, 256, CvType.CV_8UC3);
drawHistogram(histImage, hist, histColor);
drawHistogram(histImage, cdf, cdfColor);
saveImage(filename, histImage);
}
private Mat matchHistograms(String input, String traget) {
Mat input_y_hist = new Mat();
Mat target_y_hist = new Mat();
calcHistogram(input, input_y_hist, input_y_cdf_mat);
histNCDFtoFile("inputHistNCDF.jpg", input_y_hist, input_y_cdf_mat, inputHistColor, inputCDFColor);
calcHistogram(traget, target_y_hist, target_y_cdf_mat);
histNCDFtoFile("targetHistNCDF.jpg", target_y_hist, target_y_cdf_mat, targetHistColor, targetCDFColor);
Mat ylut = calculateLUT(input_y_cdf_mat, target_y_cdf_mat);
Mat image;
Mat dst = new Mat(); // this Matrix will hold the transformed image
image = Imgcodecs.imread(input);
transformLight(image, dst, ylut);
return dst;
}
Here is an exmaple image from pixabay that I want to transform:
And this is the image that I use as a target:
And this is the result:
The CDF of the result and the target is here:
The light green is the target CDF and the light blue is the result CDF