Android OpenCV eye-tracking by color detection - android

I have a code which detect face, both eyes and then the pupil of each eyes. Now I want to detect the corners of each eyes by using white color detection. As I am new to opencv could you please guide me that how should I do this and provide me with an android sample code? This is a part of the code which I have:
for (int i = 0; i < facesArray.length; i++){
Rect r = facesArray[i];
Core.rectangle(mGray, r.tl(), r.br(), new Scalar(0, 255, 0, 255), 3);
Core.rectangle(mRgba, r.tl(), r.br(), new Scalar(0, 255, 0, 255), 3);
eyearea = new Rect(r.x +r.width/8,(int)(r.y + (r.height/4.5)),r.width - 2*r.width/8,(int)( r.height/3.0));
Core.rectangle(mRgba,eyearea.tl(),eyearea.br() , new Scalar(255,0, 0, 255), 2);
Rect eyearea_right = new Rect(r.x +r.width/16,(int)(r.y + (r.height/4.5)),(r.width - 2*r.width/16)/2,(int)( r.height/3.0));
Rect eyearea_left = new Rect(r.x +r.width/16 +(r.width - 2*r.width/16)/2,(int)(r.y + (r.height/4.5)),(r.width - 2*r.width/16)/2,(int)( r.height/3.0));
Core.rectangle(mGray,eyearea_left.tl(),eyearea_left.br() , new Scalar(255,0, 0, 255), 2);
Core.rectangle(mRgba,eyearea_right.tl(),eyearea_right.br() , new Scalar(255, 0, 0, 255), 2);
if(learn_frames<5){
//This part detect the pupil of the eyes
teplateR = get_template(mCascadeER,eyearea_right,20);
teplateL = get_template(mCascadeEL,eyearea_left,20);
learn_frames++;
}else{
match_value = match_eye(eyearea_right,teplateR,FdActivity.method);
match_value = match_eye(eyearea_left,teplateL,FdActivity.method);
;
}
Imgproc.resize(mRgba.submat(eyearea_left), mZoomWindow2, mZoomWindow2.size());
Imgproc.resize(mRgba.submat(eyearea_right), mZoomWindow, mZoomWindow.size());
}

You need something like Flandmark to detect corners.
See: http://cmp.felk.cvut.cz/~uricamic/flandmark/

Related

Opencv - FloodFill doesn't change mask image

I try to use floodFill algorithm on my android application. I click a point to select seed.Then I run floodfill algorithm. I want to show only selected area same as seed color. To do this, I display mask mat object. But it is black everytime, it doens't change. I used FLOODFILL_FIXED_RANGE and FLOODFILL_MASK_ONLY flags.
My code is here :
Imgproc.cvtColor(mRgba, temp, Imgproc.COLOR_RGBA2RGB);
Imgproc.cvtColor(temp, temp, Imgproc.COLOR_RGB2GRAY);
mMask = Mat.zeros(mMask.size(), CvType.CV_8UC1);
Imgproc.floodFill(temp, mMask, fpts.get(fpts.size()-1), new Scalar(255, 255, 255),new Rect(new Point(0,0), new Point(5,5)),new Scalar(30), new Scalar(30), Imgproc.FLOODFILL_FIXED_RANGE);
Core.circle(temp, fpts.get(fpts.size()-1), 7, new Scalar(255, 255, 255), RADIUS);
Mat temp3 = new Mat(temp.size(), mMask.type());
temp3 = mMask.submat(new Rect( 2, 2, mMask.width()-2, mMask.height()-2)) ;
Log.i(TAG, temp3.width() + "-" + temp3.height()+"**" + temp.width()+"-"+temp.height());
// / Show me what you got from template matching
Core.rectangle(temp, matchLoc, new Point(matchLoc.x + mTemp.cols(),
matchLoc.y + mTemp.rows()), new Scalar(0, 255, 0));
return temp3;
If I return temp, I can show changed input image.
You need to set the new value for the mask in the Imgproc.floodFill flags. To change the mask to white (255):
int flags = 4 + (255 << 8) + Imgproc.FLOODFILL_FIXED_RANGE;

How to return vectors lists (vector<Point2f>) from JNI to Java?

I am new to Android+OpenCV+JNI development. I am trying to find out the largest contour
here is my native code:
JNIEXPORT jint JNICALL
Java_org_opencv_samples_tutorial3_Sample3Native_FindSquares
(JNIEnv* env, jobject, jlong addrRgba, jint draw) {
Mat& image = *(Mat*) addrRgba;
int thresh = 50, N = 4;
int found = 0;
Mat pyr, timg, gray0(image.size(), CV_8U), gray;
pyrDown(image, pyr, Size(image.cols / 2, image.rows / 2));
pyrUp(pyr, timg, image.size());
vector < vector<Point> > contours;
// find squares in every color plane of the image
for (int c = 1; c < 3; c++) {
int ch[] = { c, 0 };
mixChannels(&timg, 1, &gray0, 1, ch, 1);
// try several threshold levels
for (int l = 0; l < N; l++) {
// hack: use Canny instead of zero threshold level.
// Canny helps to catch squares with gradient shading
if (l == 0) {
// apply Canny. Take the upper threshold from slider
// and set the lower to 0 (which forces edges merging)
Canny(gray0, gray, 0, thresh, 5);
// dilate canny output to remove potential
// holes between edge segments
dilate(gray, gray, Mat(), Point(-1, -1));
} else {
// apply threshold if l!=0:
// tgray(x,y) = gray(x,y) < (l+1)*255/N ? 255 : 0
gray = gray0 >= (l + 1) * 255 / N;
}
// find contours and store them all as a list
findContours(gray, contours, CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE);
vector<Point> approx;
// test each contour
for (size_t i = 0; i < contours.size(); i++) {
// approximate contour with accuracy proportional
// to the contour perimeter
approxPolyDP(Mat(contours[i]), approx,
arcLength(Mat(contours[i]), true) * 0.02, true);
// square contours should have 4 vertices after approximation
// relatively large area (to filter out noisy contours)
// and be convex.
// Note: absolute value of an area is used because
// area may be positive or negative - in accordance with the
// contour orientation
if (approx.size() == 4 && fabs(contourArea(Mat(approx))) > 1000
&& isContourConvex(Mat(approx))) {
double maxCosine = 0;
for (int j = 2; j < 5; j++) {
// find the maximum cosine of the angle between joint edges
double cosine = fabs(
angle(approx[j % 4], approx[j - 2],
approx[j - 1]));
maxCosine = MAX(maxCosine, cosine);
}
// if cosines of all angles are small
// (all angles are ~90 degree) then write quandrange
// vertices to resultant sequence
if (maxCosine < 0.3) {
circle(image, approx[0], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[1], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[2], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
circle(image, approx[3], 5, Scalar(255, 0, 0, 255), 3,
4, 0);
//rectangle(image, approx[0], approx[2], Scalar(0,255,0,255), 5, 4, 0);
//Center of this rectangle
int x = (int) ((approx[0].x + approx[1].x + approx[2].x
+ approx[3].x) / 4.0);
int y = (int) ((approx[0].y + approx[1].y + approx[2].y
+ approx[3].y) / 4.0);
if ((int) draw) {
//outline
line(image, approx[0], approx[1],
Scalar(0, 255, 0, 255), 1, 4, 0);
line(image, approx[1], approx[2],
Scalar(0, 255, 0, 255), 1, 4, 0);
line(image, approx[2], approx[3],
Scalar(0, 255, 0, 255), 1, 4, 0);
line(image, approx[3], approx[0],
Scalar(0, 255, 0, 255), 1, 4, 0);
}
/// Get the moments
vector<Moments> mu(contours.size());
for (int i = 0; i < contours.size(); i++) {
mu[i] = moments(contours[i], false);
}
/// Get the mass centers:
vector<Point2f> mc(contours.size());
for (int i = 0; i < contours.size(); i++) {
mc[i] = Point2f(mu[i].m10 / mu[i].m00,
mu[i].m01 / mu[i].m00);
}
/// Draw contours
for (int i = 0; i < contours.size(); i++) {
circle(image, mc[i], 10, Scalar(255, 0, 0), 1,
CV_AA, 0);
}
found = 1;
jint result = (jint) found;
return result;
}
}
}
}
}
jint result = (jint) found;
return result;
}
And I
am calling this native method from java code as
public native void FindFeatures(long matAddrGr, long matAddrRgba);
Everything works fine for me. I am returning int from native code Java code. My doubt is how can I return vector(which is 'mc' object in my method) from native code to Java. Please help me to find out the way to return vectors lists from JNI to Java.
vector<Point2f> corresponds to MatOfPoint2f in Java which is a derived class of Mat.
Use the vector to Mat conversion function as in opencv repo(1)
void vector_Point2f_to_Mat(vector<Point2f>& v_point, Mat& mat)
{
mat = Mat(v_point, true);
}

OpenCv Core.add error

I've a problem with this metod of opencv library...
Imgproc.cvtColor(image , image , Imgproc.COLOR_RGBA2RGB);
Mat prob_fgd = new Mat(1, 1, CvType.CV_8U,
new Scalar(Imgproc.GC_PR_FGD));
try {
Imgproc.grabCut(image, firstMask, rect, bgModel, fgModel, 3, 0);
} catch (Exception w) {
System.out.println(w.getMessage());
}
Core.compare(firstMask, prob_fgd, firstMask, Core.CMP_EQ);
foreground = new Mat(image.size(), CvType.CV_8UC3, new Scalar(255, 255,
255));
image.copyTo(foreground, firstMask);
Imgproc.resize(background, background, image.size());
mask = new Mat(image.size(), CvType.CV_8UC1, new Scalar(100, 255,
100));
foreground = overlay_colored_roi(foreground, new Scalar(100, 255, 100));
Imgproc.cvtColor(foreground, mask, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(mask, mask, 254, 255, Imgproc.THRESH_BINARY_INV);
mask.copyTo(ref);
vals = new Mat(1, 1, CvType.CV_8UC3, new Scalar(0.0));
background.copyTo(dst);
background.setTo(vals, mask);
until here the code works.
here it stop.
log say that input element of core.add must have the same size but background.size()) foreground.size()) dst.size()) mask.size()) are equals.
Core.add(background, foreground, dst, mask);
They also must have the same number of channels.Since in the code background initialization is not shown I'm assuming that must be the problem.Secondly try normal addition function that is without the mask and check the output.If the problem still persists post the full code.Hope this helps.

how to detect eye pupil circularly in opencv

I am working on opencv in android and i want to change eye pupil color through Hue channel and i achieve this already but the problem is that the region i detected is in rectangle but i want this region circular as eye pupil is circular region. kindly help how i achieve this.
private Mat get_template(CascadeClassifier clasificator, Rect area,int size){
Mat template = new Mat();
Mat mROI = mGray.submat(area);
MatOfRect eyes = new MatOfRect();
Point iris = new Point();
Rect eye_template = new Rect();
clasificator.detectMultiScale(mROI, eyes, 1.15, 2,Objdetect.CASCADE_FIND_BIGGEST_OBJECT|Objdetect.CASCADE_SCALE_IMAGE, new Size(30,30),new Size());
Rect[] eyesArray = eyes.toArray();
for (int i = 0; i < eyesArray.length; i++){
Rect e = eyesArray[i];
e.x = area.x + e.x;
e.y = area.y + e.y;
Rect eye_only_rectangle = new Rect((int)e.tl().x,(int)( e.tl().y + e.height*0.4),(int)e.width,(int)(e.height*0.6));
mROI = mGray.submat(eye_only_rectangle);
Mat vyrez = mRgba.submat(eye_only_rectangle);
Core.MinMaxLocResult mmG = Core.minMaxLoc(mROI);
Core.circle(vyrez, mmG.minLoc,2, new Scalar(255, 255, 255, 255),2);
iris.x = mmG.minLoc.x + eye_only_rectangle.x;
iris.y = mmG.minLoc.y + eye_only_rectangle.y;
eye_template = new Rect((int)iris.x-size/2,(int)iris.y-size/2 ,size,size);
Core.rectangle(mRgba,eye_template.tl(),eye_template.br(),new Scalar(255, 0, 0, 255), 2);
template = (mGray.submat(eye_template)).clone();
return template;
}
return template;
}
Some potential solutions:
the simplest, although it might not be very robust is to calculate the inscribed circle (the circle bound by the rectangle) and change it's color - if your pupil detection is very accurate this solution may work fine.
a more robust solution would be to detect the area of the pupil based on color or gradient (edge detection)

haar cascade for eye ball in opencv android

I am working on opencv eye detection project and i have sucessfully detect rectangular region of both eyes through the help of haar cascade for boths eyes. now i want to detect the eye balls from both eyes, the problem is that i have no haar cascade for eye ball tracking. kindly help me if anyone of you have this xml and suggest other solution.
here is my code of eye detection
private Mat get_template(CascadeClassifier clasificator, Rect area,int size)
{
Mat eye = new Mat();
Mat template = new Mat();
Mat mROI = mGray.submat(area);
MatOfRect eyes = new MatOfRect();
Point iris = new Point();
Rect eye_template = new Rect();
clasificator.detectMultiScale(mROI, eyes, 1.15, 2, Objdetect.CASCADE_FIND_BIGGEST_OBJECT|Objdetect.CASCADE_SCALE_IMAGE, new Size(30,30), new Size());
Rect[] eyesArray = eyes.toArray();
for (int i = 0; i < eyesArray.length; i++)
{
Rect e = eyesArray[i];
e.x = area.x + e.x;
e.y = area.y + e.y;
Core.rectangle(mROI, e.tl(), e.br(), new Scalar(25, 50, 0, 255));
Rect eye_only_rectangle = new Rect((int)e.tl().x, (int)( e.tl().y + e.height*0.4), (int)e.width, (int)(e.height*0.6));
//reduce ROI
mROI = mGray.submat(eye_only_rectangle);
Mat vyrez = mRgba.submat(eye_only_rectangle);
Core.MinMaxLocResult mmG = Core.minMaxLoc(mROI);
//Draw pink circle on eyeball
int radius = vyrez.height()/2;
// Core.circle(vyrez, mmG.minLoc, 2, new Scalar(0, 255, 0, 1), radius);
//Core.circle(vyrez, mmG.minLoc,2, new Scalar(255, 0, 255),1);
iris.x = mmG.minLoc.x + eye_only_rectangle.x;
iris.y = mmG.minLoc.y + eye_only_rectangle.y;
eye_template = new Rect((int)iris.x-size/2,(int)iris.y-size/2 ,size,size);
//draw red rectangle around eyeball
//Core.rectangle(mRgba,eye_template.tl(),eye_template.br(),new Scalar(255, 0, 0, 255), 2);
eye = (mRgba.submat(eye_only_rectangle));
template = (mGray.submat(eye_template)).clone();
//return template;
Mat eyeball_HSV = new Mat();
Mat dest = new Mat();
//Mat eye = new Mat();
//eye = mRgba.submat(eye_only_rectangle);
List<Mat> hsv_channel = new ArrayList<Mat>();
//convert image to HSV
Imgproc.cvtColor(eye, eyeball_HSV, Imgproc.COLOR_RGB2HSV, 0);
// get HSV channel
//hsv_channel[0] is hue
//hsv_channel[1] is saturation
//hsv_channel[2] is visibility
Core.split(eyeball_HSV, hsv_channel);
try
{
hsv_channel.get(0).setTo(new Scalar(145));
Log.v(TAG, "Got the Channel!");
}
catch(Exception ex)
{
ex.printStackTrace();
Log.v(TAG, "Didn't get any channel");
}
Core.merge(hsv_channel, eyeball_HSV);
Imgproc.cvtColor(eyeball_HSV, dest, Imgproc.COLOR_HSV2RGB);
Imgproc.cvtColor(dest, eye, Imgproc.COLOR_RGB2RGBA);
}
return eye;
}`enter code here`
If you are willing to consider other solutions then haar cascades, you can use facial landmark detection code. Facial landmark packages can give the location of the eyes in the image (usually, center of the eye and left and right borders).
Examples of landmark detection packages:
STASM:
http://www.milbo.users.sonic.net/stasm/
Flandmark detector:
http://cmp.felk.cvut.cz/~uricamic/flandmark/

Categories

Resources