Related
I'm using opencv 3.0.0 on android-studio, i need to convert a RGB image to YIQ so i have to do some adds and subtracts equations. i used the Core.Split of openCV to take the Red , Green, and blue channel from the RGB image. Then i used this channels to calculte the YIQ image using this equations : equation. after the test of my appp i got this Error : A/libc: Fatal signal 11 (SIGSEGV), code 1 (SEGV_MAPERR), fault addr 0x19b in tid 27631 (orstackoverflow), pid 27631 (orstackoverflow)
And this is my code
public class MainActivity extends AppCompatActivity {
private static final String TAG = "3:qinQctivity";
Button button;
ImageView imageView;
ArrayList<Mat> RGB = new ArrayList<Mat>(3);
ArrayList<Mat> YIQ = new ArrayList<Mat>(3);
Mat newImage;
Mat Blue,Green,Red,I,Y,Q,B,X,D,W;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
button = findViewById(R.id.button);
imageView = findViewById(R.id.image);
button.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View view) {
if (!OpenCVLoader.initDebug()) {
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_0_0, getApplicationContext(), baseLoaderCallback);
} else {
baseLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
});
}
BaseLoaderCallback baseLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
super.onManagerConnected(status);
if (status == LoaderCallbackInterface.SUCCESS) {
try {
Y = new Mat();
Q = new Mat();
I = new Mat();
X = new Mat();
newImage = Utils.loadResource(getApplicationContext(), R.drawable.retinalimage, CvType.CV_32FC3);
Core.split(newImage, RGB);
Blue = RGB.get(0);
Red = RGB.get(1);
Green = RGB.get(2);
B = new Mat(); // result
D = new Mat(); // result
W = new Mat(); // result
/*working on Y channel*/
Scalar alpha_Y = new Scalar(0.299); // the factor
Scalar alpha1_Y = new Scalar(0.587); // the factor
Scalar alpha2_Y = new Scalar(0.114); // the factor
Core.multiply(Red,alpha_Y, B);
Core.multiply(Green,alpha1_Y,D);
Core.multiply(Blue,alpha2_Y,W);
Core.add(B,D,Y);
Log.i(TAG, "onManagerConnected: "+ Y.toString());
Core.add(Y,W,Y);
/*I = 0.211 * Red - 0.523 * Green + 0.312 * Blue;*/
Mat Z = new Mat(); // result
Mat P = new Mat(); // result
Mat O = new Mat(); // result
/*working on I channel*/
Scalar alpha_I = new Scalar(0.211); // the factor
Scalar alpha1_I = new Scalar(0.523); // the factor
Scalar alpha2_I = new Scalar(0.312); // the factor
Core.multiply(Red,alpha_I,Z);
Core.multiply(Green,alpha1_I,P);
Core.multiply(Blue,alpha2_I,O);
Core.add(Z,P,I);
Core.add(I,O,I);
/*working on Q channel*/
/*Q = 0.596 * Red - 0.274 * Green - 0.322 * Blue;*/
Mat V = new Mat();
Mat W = new Mat();
Mat N = new Mat();
Scalar alpha_Q = new Scalar(0.596); // the factor
Scalar alpha1_Q = new Scalar(0.274); // the factor
Scalar alpha2_Q = new Scalar(0.322); // the factor
Core.multiply(Red,alpha_Q,V);
Core.multiply(Green,alpha1_Q,W);
Core.multiply(Blue,alpha2_Q,N);
Core.subtract(V,W,Q);
Core.subtract(Q,N,Y);
YIQ.add(Y);
YIQ.add(I);
YIQ.add(Q);
Core.merge(YIQ,X);
showImage(X);
} catch(IOException e){
e.printStackTrace();
}
}
}
} ;
void showImage (Mat y){
Bitmap bm = Bitmap.createBitmap(y.width(), y.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(y, bm);
imageView.setImageBitmap(bm);
}
You are not allocating memory for the mats B,D,W,Z,P,O,.... You need to get the size of the original RGB Matrices and pass the size to the new matrix constructors. According to OpenCV documentation for the multiply function:
Parameters:
src1 - First source array.
src2 - Second source array of the same size and the same type as src1.
dst - Destination array of the same size and type as src1.
(highlights mine)
I am trying to implement Paper detection through OpenCV. I am able to understand the concept of how can I get it,
Input-> Canny-> Blur-> Find Conture-> Search (closed)Quadrilateral-> Draw Conture
but still, I am new to OpenCV programming. So having issues in implementing it. I was able to find help through this answer
Android OpenCV Paper Sheet detection
but it's drawing contour on every possible lining. Here is the code I am trying to implement.
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
Imgproc.drawContours(mRgba,findContours(mRgba), 0, new Scalar(0 , 255, 0), 5);
return mRgba;
}
public static class Quadrilateral {
public MatOfPoint contour;
public Point[] points;
public Quadrilateral(MatOfPoint contour, Point[] points) {
this.contour = contour;
this.points = points;
}
}
public static Quadrilateral findDocument( Mat inputRgba ) {
ArrayList<MatOfPoint> contours = findContours(inputRgba);
Quadrilateral quad = getQuadrilateral(contours);
return quad;
}
private static ArrayList<MatOfPoint> findContours(Mat src) {
double ratio = src.size().height / 500;
int height = Double.valueOf(src.size().height / ratio).intValue();
int width = Double.valueOf(src.size().width / ratio).intValue();
Size size = new Size(width,height);
Mat resizedImage = new Mat(size, CvType.CV_8UC4);
Mat grayImage = new Mat(size, CvType.CV_8UC4);
Mat cannedImage = new Mat(size, CvType.CV_8UC1);
Imgproc.resize(src,resizedImage,size);
Imgproc.cvtColor(resizedImage, grayImage, Imgproc.COLOR_RGBA2GRAY, 4);
Imgproc.GaussianBlur(grayImage, grayImage, new Size(5, 5), 0);
Imgproc.Canny(grayImage, cannedImage, 75, 200);
ArrayList<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(cannedImage, contours, hierarchy, Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
hierarchy.release();
Collections.sort(contours, new Comparator<MatOfPoint>() {
#Override
public int compare(MatOfPoint lhs, MatOfPoint rhs) {
return Double.valueOf(Imgproc.contourArea(rhs)).compareTo(Imgproc.contourArea(lhs));
}
});
resizedImage.release();
grayImage.release();
cannedImage.release();
return contours;
}
private static Quadrilateral getQuadrilateral(ArrayList<MatOfPoint> contours) {
for ( MatOfPoint c: contours ) {
MatOfPoint2f c2f = new MatOfPoint2f(c.toArray());
double peri = Imgproc.arcLength(c2f, true);
MatOfPoint2f approx = new MatOfPoint2f();
Imgproc.approxPolyDP(c2f, approx, 0.02 * peri, true);
Point[] points = approx.toArray();
// select biggest 4 angles polygon
if (points.length == 4) {
Point[] foundPoints = sortPoints(points);
return new Quadrilateral(c, foundPoints);
}
}
return null;
}
private static Point[] sortPoints(Point[] src) {
ArrayList<Point> srcPoints = new ArrayList<>(Arrays.asList(src));
Point[] result = { null , null , null , null };
Comparator<Point> sumComparator = new Comparator<Point>() {
#Override
public int compare(Point lhs, Point rhs) {
return Double.valueOf(lhs.y + lhs.x).compareTo(rhs.y + rhs.x);
}
};
Comparator<Point> diffComparator = new Comparator<Point>() {
#Override
public int compare(Point lhs, Point rhs) {
return Double.valueOf(lhs.y - lhs.x).compareTo(rhs.y - rhs.x);
}
};
// top-left corner = minimal sum
result[0] = Collections.min(srcPoints, sumComparator);
// bottom-right corner = maximal sum
result[2] = Collections.max(srcPoints, sumComparator);
// top-right corner = minimal diference
result[1] = Collections.min(srcPoints, diffComparator);
// bottom-left corner = maximal diference
result[3] = Collections.max(srcPoints, diffComparator);
return result;
}
The answer suggests that I should use Quadrilateral Object and call it with Imgproc.drawContours(), but this function takes in ArrayList as argument where as Quadrilateral object contains MatofPoint and Point[]. Can someone help me through this..I am using OpenCV(3.3) and Android (1.5.1)?
Here is the sample what it should look like
As I use the back camera to cap a frame, by default the Android application is landscape so to get the input frame I use
Core.flip(currentFrame, currentFrame, 1);//flip around Y-axi
After some image enhancement and findcontour using opencv,
I have the following problems:
a. Object moves left hand side, drawcirle moves downward.
b. Object moves right hand side, drawcircle moves upward.
c. Object moves upward, drawcircile moves left hand side.
d. Object moves downward, drawcircle moves right hand side.
In other word, the drawcircle (output) should be clockwise 90 to get the image of the source 1.
Code shown as follows:
package com.mtyiuaa.writingintheair;
import android.support.v7.app.AppCompatActivity;
import android.os.Bundle;
import android.util.Log;
import android.view.SurfaceView;
import android.view.View;
import android.content.Intent;
import android.view.ViewDebug;
import android.widget.Button;
import java.util.ArrayList;
import java.util.List;
import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.JavaCameraView;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.Point;
import org.opencv.core.MatOfPoint;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Scalar;
import org.opencv.core.Size;
import org.opencv.core.Rect;
import org.opencv.imgproc.Imgproc;
import org.opencv.imgproc.Moments;
import org.opencv.highgui.VideoCapture;
public class MainActivity extends AppCompatActivity implements CameraBridgeViewBase.CvCameraViewListener2{
private static final int THRESH_BINARY = 1;
private static final int THRESH_TOZERO = 4;
private static String TAG = "MainActivity";
JavaCameraView javaCameraView;
JavaCameraView javaCameraView2;
VideoCapture videoCapture;
Mat mRgba;
Mat temp;
Mat previousFrame;
Mat GpreviousFrame; // gray-level frame of previous Frame
Mat currentFrame;
Mat GcurrentFrame; // gray-level frame of current Frame
Mat diffFrame;
Mat imgGray;
Mat imgHSV;
Mat imgCanny;
Mat inputFrame;
Mat FlipFrame;
Mat outputFrame;
Mat imgthresholding;
Mat imgNormalization;
Mat imgGaussianSmothing;
int max_Binary_value = 255;
int thresh = 20;
Boolean CameraActive;
Boolean firstIteration= true;
int[] theObject = {0,0};
int x=0, y=0;
int FRAME_WIDTH = 1280;
int FRAME_HEIGHT = 720;
//max number of objects to be detected in frame
int MAX_NUM_OBJECTS=50;
//Minimum and Maximum object area
int MIN_OBJECT_AREA = 20*20;
int MAX_OBJECT_AREA = (int) ((FRAME_HEIGHT*FRAME_WIDTH)/1.5);
//MatOfPoint allcontours = new MatOfPoint();
//bounding rectangle of the object, we will use the center of this as its position.
BaseLoaderCallback mLoaderCallBack = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch(status){
case BaseLoaderCallback.SUCCESS:{
javaCameraView.enableView();
//javaCameraView2.enableView();
break;
}
default:{
super.onManagerConnected(status);
break;
}
}
}
};
static{
}
//JavaCameraView javaCameraView;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
javaCameraView = (JavaCameraView)findViewById(R.id.java_camera_view);
javaCameraView.setVisibility(SurfaceView.VISIBLE);
javaCameraView.setCvCameraViewListener(this);
#Override
protected void onPause(){
super.onPause();
if(javaCameraView!=null) {
CameraActive = false;
javaCameraView.disableView();
}
}
#Override
protected void onDestroy(){
super.onDestroy(); // call the basic function
if(javaCameraView!=null){
javaCameraView.disableView();
}
}
#Override
protected void onResume(){
super.onResume(); //call based class
if(OpenCVLoader.initDebug()){
Log.i(TAG, "OpenCV loaded successfully");
mLoaderCallBack.onManagerConnected(LoaderCallbackInterface.SUCCESS);
//grab a new instance by using Basecallbackloader
}
else {
Log.i(TAG, "OpenCV not loaded");
//recall opencvLoader if not loaded
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_10, this, mLoaderCallBack);
}
}
#Override
public void onCameraViewStarted(int width, int height) {
//Mat::Mat(int rows, int cols, int type)
// initialize all Mat object when onCamera starts
CameraActive = true;
// 4 channels are used
mRgba = new Mat(height, width, CvType.CV_8SC4);
FlipFrame = new Mat(height, width, CvType.CV_8SC4);
previousFrame =new Mat(height, width, CvType.CV_8SC4);
currentFrame = new Mat(height, width, CvType.CV_8SC4);
diffFrame =new Mat(height, width, CvType.CV_8SC4);
// 1 channel is used.
GcurrentFrame = new Mat(height, width, CvType.CV_8SC1);
GpreviousFrame = new Mat(height, width, CvType.CV_8SC1);
imgGray= new Mat(height, width, CvType.CV_8SC1);
imgHSV = new Mat (height, width, CvType.CV_8SC1);
imgCanny = new Mat(height, width, CvType.CV_8SC1);
imgGaussianSmothing = new Mat(height, width, CvType.CV_8SC1);
imgthresholding = new Mat(height, width, CvType.CV_8SC1);
imgNormalization = new Mat(height,width, CvType.CV_8SC1);
inputFrame = new Mat(height, width, CvType.CV_8SC1);
outputFrame = new Mat(height, width, CvType.CV_8SC1);
temp = new Mat(height, width, CvType.CV_8SC1);
}
#Override
public void onCameraViewStopped() {
mRgba.release();
FlipFrame.release();
previousFrame.release();
currentFrame.release();
diffFrame.release();
GcurrentFrame.release();
GpreviousFrame.release();
imgGray.release();
imgHSV.release();
imgCanny.release();
imgGaussianSmothing.release();
imgthresholding.release();
imgNormalization.release();
inputFrame.release();
outputFrame.release();
temp.release();
CameraActive = false;
}
#Override
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
while(CameraActive) {
Mat temp2 = new Mat();
Mat temp3 = new Mat();
currentFrame = inputFrame.rgba();
Core.flip(currentFrame, currentFrame, 1);//flip aroud Y-axis
RGB2HSV(currentFrame).copyTo(temp2);
FilterHSVImage(temp2).copyTo(temp2);
//CannyDetector(temp2).copyTo(temp4);
MorphOperation(temp2).copyTo(temp2);
List<MatOfPoint> contours = new ArrayList<>();
Mat hierarchy = new Mat();
Imgproc.findContours(temp2,contours,hierarchy,Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
temp2.copyTo(temp3);
FindLargestContours(temp3, contours);
//return outputFrame;
}
return null;
}
// Edge Detector using Canny
// Goal: Edge image is less sensitive to lighting conditon
public Mat CannyDetector(Mat inputFrame) {
Imgproc.Canny(inputFrame, imgCanny, 50, 150);
return imgCanny;
}
private Mat RGB2Gray (Mat inputFrame){
Imgproc.cvtColor(inputFrame, imgGray, Imgproc.COLOR_RGB2GRAY);
return imgGray;
}
private Mat RGB2HSV (Mat inputFrame){
Imgproc.cvtColor(inputFrame, imgHSV, Imgproc.COLOR_RGB2HSV);
return imgHSV;
}
private Mat FilterHSVImage(Mat inputFrame){
Core.inRange(inputFrame, new Scalar(0, 100, 100), new Scalar(10, 255, 255), imgthresholding);
//Core.inRange(temp2, new Scalar(160, 100, 100), new Scalar(179, 255, 255), temp2);
return imgthresholding;
}
private Mat MorphOperation (Mat inputFrame){
//Mat element1 = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(2*dilation_size + 1, 2*dilation_size+1));
//Imgproc.dilate(source, destination, element1);
//Highgui.imwrite("dilation.jpg", destination);
Mat erodeElement =Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size(3,3));
Mat dilateElement = Imgproc.getStructuringElement(Imgproc.MORPH_RECT, new Size (8,8));
Imgproc.dilate(inputFrame, inputFrame, erodeElement);
Imgproc.dilate(inputFrame, inputFrame, erodeElement);
Imgproc.erode(inputFrame, inputFrame, dilateElement);
Imgproc.erode(inputFrame, inputFrame, dilateElement);
return inputFrame;
}
private Mat Threshold(Mat inputFrame){
Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, Imgproc.THRESH_TOZERO);
return imgthresholding;
}
private Mat ThresholdToBinary(Mat inputFrame){
Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, Imgproc.THRESH_BINARY);
//Imgproc.threshold(inputFrame, imgthresholding, thresh, max_Binary_value, THRESH_BINARY);
return imgthresholding;
}
private Mat Normalization(Mat inputFrame, double min, double max){
//double E_Max =
Core.normalize(inputFrame, imgNormalization, min, max, Core.NORM_MINMAX);
return imgNormalization;
}
private Mat drawObject(int x, int y, Mat inputFrame) {
Point point = new Point(x, y);
Point pointA = new Point(x, y - 25);
Point pointB = new Point(x, y + 25);
Point pointC = new Point(x - 25, y);
Point pointD = new Point(x + 25, y);
Scalar scalar = new Scalar(255, 0, 0);
Core.circle(inputFrame,point,20,scalar,2);
if(y-25>0) Core.line(inputFrame,point,pointA,scalar,2);
else Core.line(inputFrame,point,new Point(x,0),scalar,2);
if(y+25<FRAME_HEIGHT) Core.line(inputFrame,point,pointB,scalar,2);
else Core.line(inputFrame,point,new Point(x,FRAME_HEIGHT),scalar,2);
if(x-25>0)Core.line(inputFrame,point,pointC,scalar,2);
else Core.line(inputFrame,point,new Point(0,y),scalar,2);
if(x+25<FRAME_WIDTH) Core.line(inputFrame,point,pointD,scalar,2);
else Core.line(inputFrame,point,new Point(FRAME_WIDTH,y),scalar,2);
Core.putText(inputFrame, "Tracking object at (" + Integer.toString(x)+" , "+ Integer.toString(y)+ ")",point, 1, 1,scalar, 2);
// putText(inputFrame,intToString(x)+","+intToString(y),Point(x,y+30),1,1,Scalar(0,255,0),2);
Log.i(TAG, "Draw x at "+Integer.toString(x)+ " Draw y at "+ Integer.toString(y));
inputFrame.copyTo(outputFrame);
return outputFrame;
}
private void TrackFilteredObject (int x, int y, Mat filteredImage, Mat sourceImage){
boolean objectFound = false;
Mat temp3 = new Mat();
filteredImage.copyTo(temp3);
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
Mat hierarchy = new Mat();
Imgproc.findContours(temp3,contours,hierarchy,Imgproc.RETR_CCOMP, Imgproc.CHAIN_APPROX_SIMPLE);
//Point[] contourPoints = (Point[]) contours.toArray();
double refArea = 0;
if (hierarchy.size().height>0 && hierarchy.size().width>0){
// int numObjects = hierarchy.size();
//if number of objects greater than MAX_NUM_OBJECTS we have a noisy filter
//if(numObjects<MAX_NUM_OBJECTS) {
for (int index = 0; index >= 0; index =(int)hierarchy.get(index,0)[0]){
//hierarchy[index][0]) {
Moments moment = Imgproc.moments(contours.get(index), true);
double area = moment.get_m00();
//if the area is less than 20 px by 20px then it is probably just noise
//if the area is the same as the 3/2 of the image size, probably just a bad filter
//we only want the object with the largest area so we safe a reference area each
//iteration and compare it to the area in the next iteration.
if (area > MIN_OBJECT_AREA && area < MAX_OBJECT_AREA && area > refArea) {
// x = moment.m10 / area;
x= (int) (moment.get_m10()/area);
y = (int) (moment.get_m01()/area);
objectFound = true;
refArea = area;
} else objectFound = false;
}
//}
}
}
}
Replace x with y, it's pretty simple man come on
I have a piece of code that is removing bitmap background, at first the code was taking about 40s> to remove each bitmap background but I optimized to about 12son a hauwei y320 -u30 with quadcore 1.3ghz each and 6s on Galaxy s3 Quadcore although this is pretty good compared to the first product it is not good enough I want it start ranging in about 2s to 3s for the huawei due to the fact that the base user will probably have a low end device, I am using an async task with fixed fixedthreadpool of 2 and thread priority of Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND +
Process.THREAD_PRIORITY_MORE_FAVORABLE) but I was wondering, I know multi threading is the ability of running multiple tasks per single time frame but isnt there a way I can run 2 or 3 threads on one task, for example in my case run the threads on my background removal code, am sure this can yield better performance due to the fact that at the time am running this code there is no other thread within my app that is also running, I have all resources per run time of my code, I looked up online but I cannot find anything related to that, below is the my code:
public class ImageBackgrndRemover extends AppCompatActivity {
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
//instantia
if(!alreadyRun) {
BackgroundRemover remover = null;
if(remover == null)
{
remover = new BackgroundRemover();
remover.executeOnExecutor(AsyncTask.DUAL_THREAD_EXECUTOR);
}
}
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
private LoadingAnimation add;
ImageView iv;
Scalar color;
Mat dst;
private boolean alreadyRun;
public static final String TAG = "Grabcut demo";
#Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_image_backgrnd_remover);
iv = (ImageView) this.findViewById(R.id.imagePreview);
}
private int calculatePercentage(int percentage, int target)
{
int k = (int)(target*(percentage/100.0f));
return k;
}
private Bitmap backGrndErase()
{
color = new Scalar(255, 0, 0, 255);
dst = new Mat();
Bitmap bitmap = BitmapFactory.decodeResource(getResources(), R.drawable.myshirt);
Log.d(TAG, "bitmap: " + bitmap.getWidth() + "x" + bitmap.getHeight());
bitmap = ResizeImage.getResizedBitmap(bitmap, calculatePercentage(40, bitmap.getWidth()), calculatePercentage(40, bitmap.getHeight()));
// Bitmap bitmap2 = ImageCornerMoulder.getRoundedCornerBitmap(bmp, calculatePercentage(5, bmp.getHeight()));
bitmap = bitmap.copy(Bitmap.Config.ARGB_8888, true);
Log.d(TAG, "bitmap 8888: " + bitmap.getWidth() + "x" + bitmap.getHeight());
//GrabCut part
Mat img = new Mat();
Utils.bitmapToMat(bitmap, img);
Log.d(TAG, "img: " + img);
int r = img.rows();
int c = img.cols();
Point p1 = new Point(c/10, r/10);
Point p2 = new Point(c-c/10, r-r/10);
int border = 20;
int border2 = border + border;
Rect rect2 = new Rect(border,border,img.cols()-border2,img.rows()-border2);
Rect rect = new Rect(p1,p2);
Log.d(TAG, "rect: " + rect);
Mat mask = new Mat();
debugger(""+mask.type());
mask.setTo(new Scalar(125));
Mat fgdModel = new Mat();
fgdModel.setTo(new Scalar(255, 255, 255));
Mat bgdModel = new Mat();
bgdModel.setTo(new Scalar(255, 255, 255));
Mat imgC3 = new Mat();
Imgproc.cvtColor(img, imgC3, Imgproc.COLOR_RGBA2RGB);
Log.d(TAG, "imgC3: " + imgC3);
Log.d(TAG, "Grabcut begins");
Imgproc.grabCut(imgC3, mask, rect2, bgdModel, fgdModel, 2, Imgproc.GC_INIT_WITH_RECT);
Mat source = new Mat(1, 1, CvType.CV_8U, new Scalar(3.0));
Core.compare(mask, source, mask, Core.CMP_EQ);
Mat foreground = new Mat(img.size(), CvType.CV_8UC3, new Scalar(255, 255, 255));
img.copyTo(foreground, mask);
Imgproc.rectangle(img, p1, p2, color);
Mat background = new Mat();
try {
background = Utils.loadResource(getApplicationContext(),
R.drawable.blackcolor );
} catch (IOException e) {
e.printStackTrace();
}
Mat tmp = new Mat();
Imgproc.resize(background, tmp, img.size());
background = tmp;
Mat tempMask = new Mat(foreground.size(), CvType.CV_8UC1, new Scalar(255, 255, 255));
Imgproc.cvtColor(foreground, tempMask, 6/* COLOR_BGR2GRAY */);
//Imgproc.threshold(tempMask, tempMask, 254, 255, 1 /* THRESH_BINARY_INV */);
Mat vals = new Mat(1, 1, CvType.CV_8UC3, new Scalar(0.0));
dst = new Mat();
background.setTo(vals, tempMask);
Imgproc.resize(foreground, tmp, mask.size());
foreground = tmp;
Core.add(background, foreground, dst, tempMask);
Log.d(TAG, "Convert to Bitmap");
//removing blackbaground started
/***
Mat tmp2 = new Mat();
Mat alpha = new Mat();
Imgproc.cvtColor(dst, tmp2, Imgproc.COLOR_BGR2GRAY);
Imgproc.threshold(tmp2, alpha, 100, 255, Imgproc.THRESH_BINARY);
List<Mat> rgb = new ArrayList<Mat>(3);
Core.split(dst, rgb);
List<Mat> rgba = new ArrayList<Mat>(4);
rgba.add(rgb.get(0));
rgba.add(rgb.get(1));
rgba.add(rgb.get(2));
rgba.add(alpha);
Core.merge(rgba, dst);
Bitmap output = Bitmap.createBitmap(dst.width(), dst.height(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(dst, output);
***/
//removing back ended
Utils.matToBitmap(dst, bitmap);
//release MAT part
img.release();
imgC3.release();
mask.release();
fgdModel.release();
bgdModel.release();
alreadyRun = true;
return bitmap;
}
private void showLoadingIndicator()
{
android.support.v4.app.FragmentManager fm = getSupportFragmentManager();
add = LoadingAnimation.newInstance("b");
add.show(fm, "");
add.setCancelable(false);
}
private void dismissLoadingIndicator()
{
try {
add.dismiss();
}catch (Exception e)
{
e.printStackTrace();
}
}
private class BackgroundRemover extends AsyncTask<Void, Void, Bitmap>
{
#Override
protected void onPreExecute()
{
showLoadingIndicator();
}
#Override
protected Bitmap doInBackground(Void... voids) {
Process.setThreadPriority(Process.THREAD_PRIORITY_BACKGROUND +
Process.THREAD_PRIORITY_MORE_FAVORABLE);
try{
return backGrndErase();
}catch (Exception e)
{
System.err.println("Failed to remove background");
}
return null;
}
#Override
protected void onPostExecute(Bitmap bitmap) {
// iv.setBackgroundResource(R.drawable.blackcolor);
iv.setImageBitmap(bitmap);
dismissLoadingIndicator();
}
}
#Override
public void onResume()
{
super.onResume();
if (!OpenCVLoader.initDebug()) {
Log.d(TAG, "Internal OpenCV library not found. Using OpenCV Manager for initialization");
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_3_0_0, this, mLoaderCallback);
} else {
Log.d(TAG, "OpenCV library found inside package. Using it!");
mLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
}
}
private static Bitmap makeBlackTransparent(Bitmap image) {
// convert image to matrix
Mat src = new Mat(image.getWidth(), image.getHeight(), CvType.CV_8UC4);
Utils.bitmapToMat(image, src);
// init new matrices
Mat dst = new Mat(image.getWidth(), image.getHeight(), CvType.CV_8UC4);
Mat tmp = new Mat(image.getWidth(), image.getHeight(), CvType.CV_8UC4);
Mat alpha = new Mat(image.getWidth(), image.getHeight(), CvType.CV_8UC4);
// convert image to grayscale
Imgproc.cvtColor(src, tmp, Imgproc.COLOR_BGR2GRAY);
// threshold the image to create alpha channel with complete transparency in black background region and zero transparency in foreground object region.
Imgproc.threshold(tmp, alpha, 100, 255, Imgproc.THRESH_BINARY);
// split the original image into three single channel.
List<Mat> rgb = new ArrayList<Mat>(3);
Core.split(src, rgb);
// Create the final result by merging three single channel and alpha(BGRA order)
List<Mat> rgba = new ArrayList<Mat>(4);
rgba.add(rgb.get(0));
rgba.add(rgb.get(1));
rgba.add(rgb.get(2));
rgba.add(alpha);
Core.merge(rgba, dst);
// convert matrix to output bitmap
Bitmap output = Bitmap.createBitmap(image.getWidth(), image.getHeight(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(dst, output);
return output;
}
public void debugger(String s){
Log.v("", "########### " + s);
}
}
I am currently trying to create an Android App using OpenCV that allows the user to track points of moving objects using the smartphone camera. An analogous code in C++ that does exactly what I am looking for can be found in the following link: OpticalFlow C++ Sample Code
I have been Googling and looking around in StackOverflow, but I still can't figure out why my code is not working. I am able to place points on the screen every time I press on a certain spot, but the points seem motionless even as I move objects in front of the camera. The method used to calculate the opitcal flow is the following:
void org.opencv.video.Video.calcOpticalFlowPyrLK(Mat prevImg, Mat nextImg, MatOfPoint2f prevPts, MatOfPoint2f nextPts, MatOfByte status, MatOfFloat err)
I believe I am passing the exact parameters needed to calculate the optical flow of consecutive images, but for some reason it's not working. Below is my code:
package org.opencv.UActivity;
//INCLUDE FILES
...
public class U2Activity extends Activity implements OnTouchListener,CvCameraViewListener2{
private static final String TAG = "OCVSample::Activity";
private Mat nextGray,Rscale;
private Mat prevGray;
private MatOfPoint2f prev2D,next2D;
private MatOfByte status;
private MatOfFloat err;
private Scalar color;
private CameraBridgeViewBase mOpenCvCameraView;
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
switch (status) {
case LoaderCallbackInterface.SUCCESS:
{
Log.i(TAG, "OpenCV loaded successfully");
mOpenCvCameraView.enableView();
mOpenCvCameraView.setOnTouchListener(U2Activity.this);
} break;
default:
{
super.onManagerConnected(status);
} break;
}
}
};
public U2Activity() {
Log.i(TAG, "Instantiated new " + this.getClass());
}
/** Called when the activity is first created. */
#Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "called onCreate");
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);
setContentView(R.layout.u2_surface_view);
mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.u2_activity_surface_view);
mOpenCvCameraView.setCvCameraViewListener(this);
color = new Scalar(0, 255, 0);
}
#Override
public void onPause()
{
super.onPause();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
#Override
public void onResume()
{
super.onResume();
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this, mLoaderCallback);
}
public void onDestroy() {
super.onDestroy();
if (mOpenCvCameraView != null)
mOpenCvCameraView.disableView();
}
public void onCameraViewStarted(int width, int height) {
nextGray = new Mat(height, width, CvType.CV_8UC1); //unsigned char
Rscale = new Mat(height, width, CvType.CV_8UC1);
prevGray = new Mat(height, width, CvType.CV_8UC1);
prev2D = new MatOfPoint2f(new Point());
next2D = new MatOfPoint2f(new Point());
status = new MatOfByte();
err = new MatOfFloat();
}
public void onCameraViewStopped() {
nextGray.release();
Rscale.release();
}
public boolean onTouch(View v, MotionEvent event) {
int cols = nextGray.cols();
int rows = nextGray.rows();
int xOffset = (mOpenCvCameraView.getWidth() - cols) / 2;
int yOffset = (mOpenCvCameraView.getHeight() - rows) / 2;
int x = (int)event.getX() - xOffset;
int y = (int)event.getY() - yOffset;
if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return false;
prev2D.push_back(new MatOfPoint2f(new Point((double)x,(double)y)));
next2D.push_back(new MatOfPoint2f(new Point()));
return false; // don't need subsequent touch events
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
nextGray = inputFrame.gray(); //get current image
Rscale = nextGray; //make a copy of current image
if(prevGray.empty()) prevGray = nextGray; //on start there is no prevGray. Copy current.
Video.calcOpticalFlowPyrLK(prevGray,nextGray,prev2D,next2D,status,err); //Calc the Optical Flow
prevGray = nextGray; //Overwrite old Image (prevGray)
prev2D = next2D; //Overwrite old point coordinates
for(int i=0;i<next2D.toArray().length;i++){ //Draw the points in the image
Core.circle(Rscale, next2D.toArray()[i], 3, color);
}
return Rscale;
}
}
SOLVED:
I changed:
prevGray = nextGray;
prev2D = next2D;
to:
nextGray.copyTo(prevGray);
next2D.copyTo(prev2D);
I hope it helps anyone encountering similar problems.