Use RenderScript to emboss a shape in android - android

This question is base on link.
I tried out #miloslaw-smyk answer. However I could not get it to work. I am not sure what createPath(1) means. I am not sure how to create a path with a certain stroke width. We can do that using the fillpaint. Anyway below I have shown the complete code.Problem is I don't see any emboss effect shown by the original link (see output image below). Please let me know what I have done wrong. (I am using targetSdkVersion 23 and device is on updated 4.1.2 android)
private Bitmap puzzelImage; // screen size image
private Bitmap mBitmapIn;
private Bitmap mBitmapPuzzle;
private RenderScript mRS;
private Allocation mInAllocation;
private Allocation mPuzzleAllocation;
private Allocation mCutterAllocation;
private Allocation mOutAllocation;
private Allocation mOutAllocation2;
private Allocation mAllocationHist;
private ScriptIntrinsicBlur mScriptBlur;
private ScriptIntrinsicBlend mScriptBlend;
private ScriptIntrinsicHistogram mScriptHistogram;
private ScriptIntrinsicLUT mScriptLUT;
public Activity ctx;
private int bw = 780;
private int bh = 780; Path path2;
private void init() {
if (puzzel.mybitmap == null)
return;
bw = puzzelImage.getWidth();
bh = puzzelImage.getHeight();
mBitmapIn = puzzelImage ;
mBitmapPuzzle = Bitmap.createBitmap(bw, bh, Bitmap.Config.ARGB_8888); // this will hold the puzzle
Canvas c = new Canvas(mBitmapPuzzle);
createPath();
//path2 = path1;
fillPaint.setStrokeWidth(5);
c.drawPath(path2, fillPaint); // draw it on canvas
createScript(); // get renderscripts and Allocations ready
// Apply gaussian blur of radius 25 to our drawing
mScriptBlur.setRadius(25);
mScriptBlur.setInput(mPuzzleAllocation);
mScriptBlur.forEach(mOutAllocation);
// Now apply the blur of radius 1
mScriptBlur.setRadius(1);
mScriptBlur.setInput(mPuzzleAllocation);
mScriptBlur.forEach(mOutAllocation2);
// Subtract one blur result from another
mScriptBlend.forEachSubtract(mOutAllocation, mOutAllocation2);
// We now want to normalize the result (e.g. make it use full 0-255 range).
// To do that, we will first compute the histogram of our image
mScriptHistogram.setOutput(mAllocationHist);
mScriptHistogram.forEach(mOutAllocation2);
// copy the histogram to Java array...
int []hist = new int[256 * 4];
mAllocationHist.copyTo(hist);
// ...and walk it from the end looking for the first non empty bin
int i;
for(i = 255; i > 1; i--)
if((hist[i * 4] | hist[i * 4 + 1] | hist[i * 4 + 2]) != 0)
break;
// Now setup the LUTs that will map the image to the new, wider range.
// We also use the opportunity to inverse the image ("255 -").
for(int x = 0; x <= i; x++)
{
int val = 255 - x * 255 / i;
mScriptLUT.setAlpha(x, 255); // note we always make it fully opaque
mScriptLUT.setRed(x, val);
mScriptLUT.setGreen(x, val);
mScriptLUT.setBlue(x, val);
}
// the mapping itself.
mScriptLUT.forEach(mOutAllocation2, mOutAllocation);
Bitmap mBitmapCutter = Bitmap.createBitmap(bw, bh, Bitmap.Config.ARGB_8888);
c = new Canvas(mBitmapCutter);
createPath();
fillPaint.setStrokeWidth(1);
c.drawPath(path2, fillPaint);
mCutterAllocation = Allocation.createFromBitmap(mRS, mBitmapCutter);
// cookie cutter now
mScriptBlend.forEachDstIn(mCutterAllocation, mOutAllocation);
mScriptBlend.forEachMultiply(mOutAllocation, mInAllocation);
mInAllocation.copyTo(mBitmapPuzzle);
}
private void createScript() {
try {
mRS = RenderScript.create(ctx);
mPuzzleAllocation = Allocation.createFromBitmap(mRS, mBitmapPuzzle);
// three following allocations could actually use createSized(),
// but the code would be longer.
mInAllocation = Allocation.createFromBitmap(mRS, mBitmapIn);
mOutAllocation = Allocation.createFromBitmap(mRS, mBitmapPuzzle);
mOutAllocation2 = Allocation.createFromBitmap(mRS, mBitmapPuzzle);
mAllocationHist = Allocation.createSized(mRS, Element.I32_3(mRS), 256);
mScriptBlur = ScriptIntrinsicBlur.create(mRS, Element.U8_4(mRS));
mScriptBlend = ScriptIntrinsicBlend.create(mRS, Element.U8_4(mRS));
mScriptHistogram = ScriptIntrinsicHistogram.create(mRS, Element.U8_4(mRS));
mScriptLUT = ScriptIntrinsicLUT.create(mRS, Element.U8_4(mRS));
}catch (Exception e)
{
}
}
public void createPath()
{
path2 = new Path();
//path 1 samll one
Point[] araay = new Point[]{new Point(144,320),new Point(109,200), new Point(171,308),new Point(178,240),new Point(171,172),new Point(109,282),new Point(144,160)};
AddBeziers(path2, araay, 320, 144);
AddLine(path2, 216, 144 );
AddLine(path2, 216, 216 );
AddLine(path2, 144, 320);
fillPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
fillPaint.setColor(Color.WHITE);
fillPaint.setFlags(Paint.ANTI_ALIAS_FLAG | Paint.DITHER_FLAG);
fillPaint.setAntiAlias(true);
fillPaint.setDither(true);
fillPaint.setStrokeJoin(Paint.Join.ROUND);
fillPaint.setStrokeCap(Paint.Cap.ROUND);
fillPaint.setStyle(Paint.Style.FILL_AND_STROKE);
}
protected Path AddLine(Path path, int endX, int endY) {
//path.moveTo(startX, startY);
path.lineTo(endX, endY);
return path;
}
protected Path AddBeziers(Path path, Point[] points, int lastX, int lastY) {
if (points[0].X != lastX && points[0].Y != lastY)
path.moveTo(points[0].X, points[0].Y);
int index = 1;
path.cubicTo(points[index].X, points[index].Y, points[index + 1].X,
points[index + 1].Y, points[index + 2].X, points[index + 2].Y);
index = index + 3;
path.cubicTo(points[index].X, points[index].Y, points[index + 1].X,
points[index + 1].Y, points[index + 2].X, points[index + 2].Y);
return path;
/*pointsCounter = points.length;
//point = new PointNew(new PointF(points[1].X, points[1].Y));
prev = new PointNew(new PointF(points[0].X, points[0].Y));
//next = new PointNew(new PointF(points[2].X, points[2].Y));
for (int i =1; i < points.length; i++)
appendToPath(path, new PointF(points[i].X, points[i].Y), null);
return path;*/
}
#Override
protected void onDraw(Canvas canvas) {
canvas.drawBitmap(mBitmapPuzzle, 0, 0, fillPaint);
super.onDraw(canvas);
}

Can you try with my version of createPath()? I modified it slightly to draw larger puzzle and to accept stroke width as an argument.
public void createPath(int strokeWidth)
{
//path 2 Big one
Point[]araay = new Point[]{new Point(144,320),new Point(109,200), new Point(171,308),new Point(178,240),new Point(171,172),new Point(109,282),new Point(144,160)};
Point[]braay = new Point[araay.length];
int idx = 6;
for(Point p : araay)
braay[idx--] = new Point((200 + p.x), p.y);
path2.moveTo(144,320);
AddBeziers(path2, araay, 320, 144);
AddLine(path2, 216, 144);
AddBeziers(path2, braay, 320, 144);
AddLine(path2, 144, 320);
path2.close();
Matrix m = new Matrix();
m.setScale(2, 2);
path2.transform(m);
MaskFilter mEmboss = new EmbossMaskFilter(new float[] { 1, 1, 1 }, 0.4f, 6, 3.5f);
fillPaint = new Paint(Paint.ANTI_ALIAS_FLAG);
fillPaint.setColor(Color.WHITE);
fillPaint.setFlags(Paint.ANTI_ALIAS_FLAG | Paint.DITHER_FLAG);
fillPaint.setAntiAlias(true);
fillPaint.setDither(true);
fillPaint.setStrokeJoin(Paint.Join.ROUND);
fillPaint.setStrokeCap(Paint.Cap.ROUND);
fillPaint.setStrokeWidth(strokeWidth);
fillPaint.setStyle(Paint.Style.FILL_AND_STROKE);
fillPaint.setStrokeMiter(0);
}

Related

Perspective Transform OpenCV

I'm new to OpenCV on Android and try to do Perspective Transform but I don't know how to use getperspectivetransform() and warpperspective() functions.I could detect rectangle from an image, but don't know how to warp.
Here is the detect rectangle function:
Mat tempMat = new Mat();
Mat src = new Mat();
Utils.bitmapToMat(image, tempMat);
Imgproc.cvtColor(tempMat, src, Imgproc.COLOR_BGR2RGB);
Mat blurred = src.clone();
Imgproc.medianBlur(src, blurred, 9);
Mat gray0 = new Mat(blurred.size(), CvType.CV_8U), gray = new Mat();
List<MatOfPoint> contours = new ArrayList<MatOfPoint>();
List<Mat> blurredChannel = new ArrayList<Mat>();
blurredChannel.add(blurred);
List<Mat> gray0Channel = new ArrayList<Mat>();
gray0Channel.add(gray0);
MatOfPoint2f approxCurve = new MatOfPoint2f();
double maxArea = 0;
int maxId = -1;
for (int c = 0; c < 3; c++) {
int ch[] = { c, 0 };
Core.mixChannels(blurredChannel, gray0Channel, new MatOfInt(ch));
int thresholdLevel = 1;
for (int t = 0; t < thresholdLevel; t++) {
if (t == 0) {
Imgproc.Canny(gray0, gray, 50, 50, 3, true); // true ?
Imgproc.dilate(gray, gray, new Mat(), new Point(-1, -1), 1); // 1
// ?
} else {
Imgproc.adaptiveThreshold(gray0, gray, thresholdLevel,
Imgproc.ADAPTIVE_THRESH_GAUSSIAN_C,
Imgproc.THRESH_BINARY,
(src.width() + src.height()) / 200, t);
}
Imgproc.findContours(gray, contours, new Mat(),
Imgproc.RETR_LIST, Imgproc.CHAIN_APPROX_SIMPLE);
for (MatOfPoint contour : contours) {
MatOfPoint2f temp = new MatOfPoint2f(contour.toArray());
double area = Imgproc.contourArea(contour);
approxCurve = new MatOfPoint2f();
Imgproc.approxPolyDP(temp, approxCurve,
Imgproc.arcLength(temp, true) * 0.02, true);
if (approxCurve.total() == 4 && area >= maxArea) {
double maxCosine = 0;
List<Point> curves = approxCurve.toList();
for (int j = 2; j < 5; j++) {
double cosine = Math.abs(angle(curves.get(j % 4),
curves.get(j - 2), curves.get(j - 1)));
maxCosine = Math.max(maxCosine, cosine);
}
if (maxCosine < 0.45) {
maxArea = area;
maxId = contours.indexOf(contour);
}
}
}
}
}
I draw rectangle with this statement.
if (maxId >= 0) {
Rect rect = Imgproc.boundingRect(contours.get(maxId));
Imgproc.rectangle(src, rect.tl(), rect.br(), new Scalar(255, 0, 0,
.8), 4);
}
After that I convert mat to bitmap and show on an imageview.
Here is the screenshot
So my problem is warpping, How can I warp the rectangle and rotate it?
and If it is possible, how can I improve detecting rectangle? Any hints?
(OpenCV Android SDK Ver: 3.41, Android Studio Ver: 3.01)
If you are looking to warp the detected contour into rectangle,
Get the contours of the rectangle
find convex hull of the contour
Using approxPolyDP reduce the convex hull points into 4 points
fit line to consecutive points (ex, if pts is the array, lines are fit as follows l1 = line Between(pts[0], pts[1]), l2 = line Between(pts[1], pts[2]), l3 = line Between(pts[2], pts[3]), l4 = lineBetween(pts[3], pts[0])
find the intersection between these lines, you'll end up with four points
Order the points in clockwise order (inputCorners = TopLeft, TopRight, BottomRight, BottomLeft)
create an output image with needed resolution and make the corner points in the same clockwise order ((0,0), (0, cols), (rows, cols), (rows, 0))
find homography using the function
Mat homography = Calib3d.findHomography(inputCorners, imageCorners, Calib3d.RANSAC, 10);
using the output homography matrix, warp the input image using the function
Imgproc.warpPerspective(image, outputMat, homography, new Size(image.cols(), image.rows()));
you can refer to the following link
This is my kotlin extensin version you can use it in your projects.
fun Bitmap.perspectiveTransform(srcPoints: List<org.opencv.core.Point>) :
Bitmap{
val dstWidth = max(
srcPoints[0].distanceFrom(srcPoints[1]),
srcPoints[2].distanceFrom(srcPoints[3])
)
val dstHeight = max(
srcPoints[0].distanceFrom(srcPoints[2]),
srcPoints[1].distanceFrom(srcPoints[3])
)
val dstPoints: List<org.opencv.core.Point> = listOf(
org.opencv.core.Point(0.0, 0.0),
org.opencv.core.Point(dstWidth, 0.0),
org.opencv.core.Point(0.0, dstHeight),
org.opencv.core.Point(dstWidth, dstHeight)
)
return try {
val srcMat = Converters.vector_Point2d_to_Mat(srcPoints)
val dstMat = Converters.vector_Point2d_to_Mat(dstPoints)
val perspectiveTransformation =
Imgproc.getPerspectiveTransform(srcMat, dstMat)
val inputMat = Mat(this.height, this.width, CvType.CV_8UC1)
Utils.bitmapToMat(this, inputMat)
val outPutMat = Mat(dstHeight.toInt(), dstWidth.toInt(), CvType.CV_8UC1)
Imgproc.warpPerspective(
inputMat,
outPutMat,
perspectiveTransformation,
Size(dstWidth, dstHeight)
)
val outPut = Bitmap.createBitmap(
dstWidth.toInt(),
dstHeight.toInt(), Bitmap.Config.RGB_565
)
//Imgproc.cvtColor(outPutMat , outPutMat , Imgproc.COLOR_GRAY2BGR)
Utils.matToBitmap(outPutMat , outPut)
outPut
}
catch ( e : Exception){
e.printStackTrace()
this
}
}
To use distance from I write another extension function
fun org.opencv.core.Point.distanceFrom(srcPoint: org.opencv.core.Point):
Double {
val w1 = this.x - srcPoint.x
val h1 = this.y - srcPoint.y
val distance = w1.pow(2) + h1.pow(2)
return sqrt(distance)
}
Also in this answer the correct src Points indices are :
0 : topleft
1 : topRight
2 : bottomLeft
3 : bottomRight
Good luck

Android: Shape detection with JavaCV

I am new to JavaCV. I am trying to detect largest rectangle in image and outline it with color over original image. I am posting code below which I have tried but it is not working. I am getting edgeDetectedImage properly. I am getting 4 corner points properly. Just cvDrawLine is not working. Please Help if I am missing anything:
OnClick of button I am processing image and showing it again on ImageView.
In onClickListener of button:
if ((new File(path + "trial.jpg")).exists()) {
opencv_core.IplImage originalImage = opencv_imgcodecs.cvLoadImage(path + "trial.jpg", opencv_imgcodecs.CV_IMWRITE_JPEG_QUALITY);
opencv_core.IplImage iplImage = opencv_imgcodecs.cvLoadImage(path + "trial.jpg", opencv_imgcodecs.CV_LOAD_IMAGE_GRAYSCALE);
opencv_core.IplImage edgeDetectedImage = applyCannyRectangleEdgeDetection(iplImage, 80);
opencv_core.CvSeq largestContour = findLargestContour(edgeDetectedImage);
opencv_core.CvPoint[] cvPoints = new opencv_core.CvPoint[4];
for(int i=0; i<largestContour.total();i++)
{
opencv_core.CvPoint cvPoint = new opencv_core.CvPoint(cvGetSeqElem(largestContour, i));
cvPoints[i] = cvPoint;
}
cvDrawLine(originalImage, cvPoints[0], cvPoints[1], opencv_core.CvScalar.YELLOW, 10, 10, 10);
cvDrawLine(originalImage, cvPoints[1], cvPoints[2], opencv_core.CvScalar.YELLOW, 10, 10, 10);
cvDrawLine(originalImage, cvPoints[2], cvPoints[3], opencv_core.CvScalar.YELLOW, 10,10, 10);
cvDrawLine(originalImage, cvPoints[3], cvPoints[0], opencv_core.CvScalar.YELLOW, 10, 10,10);
opencv_imgcodecs.cvSaveImage(path + "img1.jpg", originalImage);
if ((new File(path + "img1.jpg").exists())) {
imageView.setImageDrawable(Drawable.createFromPath(path + "img1.jpg"));
}
}
Method applyCannyRectangleEdgeDetection(IplImage, int):
private opencv_core.IplImage applyCannyRectangleEdgeDetection(opencv_core.IplImage iplImage, int percent) {
opencv_core.IplImage destImage = downScaleImage(iplImage, percent);
OpenCVFrameConverter.ToMat converterToMat = new OpenCVFrameConverter.ToMat();
Frame grayImageFrame = converterToMat.convert(destImage);
opencv_core.Mat grayImageMat = converterToMat.convertToMat(grayImageFrame);
GaussianBlur(grayImageMat, grayImageMat, new opencv_core.Size(5, 5), 0.0, 0.0, BORDER_DEFAULT);
destImage = converterToMat.convertToIplImage(grayImageFrame);
cvErode(destImage, destImage);
cvDilate(destImage, destImage);
cvCanny(destImage, destImage, 20, 55);
return destImage;
}
Method downScaleImage(IplImage, int)
private opencv_core.IplImage downScaleImage(opencv_core.IplImage srcImage, int percent) {
opencv_core.IplImage destImage = cvCreateImage(cvSize((srcImage.width() * percent) / 100, (srcImage.height() * percent) / 100), srcImage.depth(), srcImage.nChannels());
cvResize(srcImage, destImage);
return destImage;
}
Method findLargestContour(IplImage)
private opencv_core.CvSeq findLargestContour(opencv_core.IplImage edgeDetectedImage) {
opencv_core.IplImage foundContoursOfImage = cvCloneImage(edgeDetectedImage);
opencv_core.CvMemStorage memory = new opencv_core.CvMemStorage().create();
opencv_core.CvSeq contours = new opencv_core.CvSeq();
cvFindContours(foundContoursOfImage, memory, contours, Loader.sizeof(opencv_core.CvContour.class), CV_RETR_LIST, CV_CHAIN_APPROX_SIMPLE, new opencv_core.CvPoint(0, 0));
int maxWidth = 0;
int maxHeight = 0;
opencv_core.CvRect contr = null;
opencv_core.CvSeq seqFound = null;
opencv_core.CvSeq nextSeq;
for (nextSeq = contours; nextSeq != null; nextSeq = nextSeq.h_next()) {
contr = cvBoundingRect(nextSeq, 0);
if ((contr.width() >= maxWidth) && (contr.height() >= maxHeight)) {
maxHeight = contr.height();
maxWidth = contr.width();
seqFound = nextSeq;
}
}
opencv_core.CvSeq result = cvApproxPoly(seqFound, Loader.sizeof(opencv_core.CvContour.class), memory, CV_POLY_APPROX_DP, cvContourPerimeter(seqFound) * 0.1, 0);
return result;
}
Sorry this should be in comments but I don't have enough reputation. What I can see from your code is that the canny is applied on a downscaled image and so is the contour. You are drawing the lines on the original image (which isn't downscaled by percent) so naturally it wouldn't look correct (if it isn't looking correct but something is being drawn). Otherwise, you should mention the color space of the image, it doesn't matter for drawing but does for canny.

Running OpenCV eye detection from within Android service

I want to run eye detection by OpenCV4Android from Android background service. I have a piece of code that runs well but as an Activity not service. I understand that the Android camera must have a preview to open. So I have created a preview (small one to make it looks hidden, since I want the processing to be in the background) and started the camera for recording. The camera starts successfully, but OpenCV doesn't detect eyes and faces. It only loads the xml classifiers. I expected the callbacks of OpenCV like onCameraViewStarted and onCameraFrame to get called when I open the camera for recording, but they didn't.
Here is the code:
public class BackgroundService extends Service implements SurfaceHolder.Callback, CameraBridgeViewBase.CvCameraViewListener2 {
private static final String TAG = "OCVSample::Activity";
private static final Scalar FACE_RECT_COLOR = new Scalar(0, 255, 0, 255);
public static final int JAVA_DETECTOR = 0;
private static final int TM_SQDIFF = 0;
private static final int TM_SQDIFF_NORMED = 1;
private static final int TM_CCOEFF = 2;
private static final int TM_CCOEFF_NORMED = 3;
private static final int TM_CCORR = 4;
private static final int TM_CCORR_NORMED = 5;
private int learn_frames = 0;
private Mat templateR;//right eye template
private Mat templateL; // left eye template
int method = 0;
private MenuItem mItemFace50;
private MenuItem mItemFace40;
private MenuItem mItemFace30;
private MenuItem mItemFace20;
private MenuItem mItemType;
private Mat mRgba;
private Mat mGray;
// matrix for zooming
private Mat mZoomWindow;
private Mat mZoomWindow2;
private File mCascadeFile;
private CascadeClassifier mJavaDetector;
private CascadeClassifier mJavaDetectorEye;
private int mDetectorType = JAVA_DETECTOR;
private String[] mDetectorName;
private float mRelativeFaceSize = 0.2f;
private int mAbsoluteFaceSize = 0;
private CameraBridgeViewBase mOpenCvCameraView;
private SeekBar mMethodSeekbar;
private TextView mValue;
double xCenter = -1;
double yCenter = -1;
MediaRecorder mediaRecorder;
// Binder given to clients
private final IBinder mBinder = new LocalBinder();
public class LocalBinder extends Binder {
BackgroundService getService() {
// Return this instance of this service so clients can call public methods
return BackgroundService.this;
}
}//end inner class that returns an instance of the service.
#Override
public IBinder onBind(Intent intent) {
return mBinder;
}//end onBind.
private WindowManager windowManager;
private SurfaceView surfaceView;
private Camera camera = null;
#Override
public void onCreate() {
// Start foreground service to avoid unexpected kill
Notification notification = new Notification.Builder(this)
.setContentTitle("Background Video Recorder")
.setContentText("")
.setSmallIcon(R.drawable.vecsat_logo)
.build();
startForeground(1234, notification);
// Create new SurfaceView, set its size to 1x1, move it to the top left corner and set this service as a callback
windowManager = (WindowManager) this.getSystemService(Context.WINDOW_SERVICE);
surfaceView = new SurfaceView(this);
WindowManager.LayoutParams layoutParams = new WindowManager.LayoutParams(
100, 100,
WindowManager.LayoutParams.TYPE_SYSTEM_OVERLAY,
WindowManager.LayoutParams.FLAG_WATCH_OUTSIDE_TOUCH,
PixelFormat.TRANSLUCENT
);
Log.i(TAG, "100 x 100 executed");
layoutParams.gravity = Gravity.LEFT | Gravity.TOP;
windowManager.addView(surfaceView, layoutParams);
surfaceView.getHolder().addCallback(this);
//constructor:
mDetectorName = new String[2];// contains 3 positions..
mDetectorName[JAVA_DETECTOR] = "Java"; //let the detector be of type java detector, specify that in the JAVA_DETECTOR index.
Log.i(TAG, "Instantiated new " + ((Object) this).getClass().getSimpleName());
OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_11, this,
mLoaderCallback); //once the application is resumed reload the library.
}
// Method called right after Surface created (initializing and starting MediaRecorder)
#Override
public void surfaceCreated(SurfaceHolder surfaceHolder) {
Log.i(TAG, "surfaceCreated method");
camera = Camera.open(1);
camera.unlock();
mediaRecorder = new MediaRecorder();
mediaRecorder.setPreviewDisplay(surfaceHolder.getSurface());
mediaRecorder.setCamera(camera);
mediaRecorder.setAudioSource(MediaRecorder.AudioSource.CAMCORDER);
mediaRecorder.setVideoSource(MediaRecorder.VideoSource.CAMERA);
mediaRecorder.setProfile(CamcorderProfile.get(CamcorderProfile.QUALITY_HIGH));
mediaRecorder.setOutputFile(
Environment.getExternalStorageDirectory()+"/"+
DateFormat.format("yyyy-MM-dd_kk-mm-ss", new Date().getTime())+
".mp4"
);
try { mediaRecorder.prepare(); } catch (Exception e) {}
mediaRecorder.start();
}
// Stop recording and remove SurfaceView
#Override
public void onDestroy() {
Log.i(TAG, "surfaceDestroyed method");
camera.lock();
camera.release();
windowManager.removeView(surfaceView);
}
#Override
public void surfaceChanged(SurfaceHolder surfaceHolder, int format, int width, int height) {}
#Override
public void surfaceDestroyed(SurfaceHolder surfaceHolder) {
}
private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
#Override
public void onManagerConnected(int status) {
//int status, status of initialization, sucess or not..
//now make a switch for the status cases: under success case do the work, load the classifiers..
switch (status) {
case LoaderCallbackInterface.SUCCESS: {
Log.i(TAG, "OpenCV loaded successfully"); // was loaded and initialized successfully..
try {
// load cascade file from application resources
InputStream is = getResources().openRawResource(
R.raw.lbpcascade_frontalface); // get the face classifier from the resource.
File cascadeDir = getDir("cascade", Context.MODE_PRIVATE);
mCascadeFile = new File(cascadeDir,
"lbpcascade_frontalface.xml"); // create a directory inside your app, and a file inside it to store the
FileOutputStream os = new FileOutputStream(mCascadeFile); // prepare an output stream that will write the classifier's code on the file in the app.
//read and write
byte[] buffer = new byte[4096];
int bytesRead;
while ((bytesRead = is.read(buffer)) != -1) {
os.write(buffer, 0, bytesRead);
}
is.close();
os.close();
// --------------------------------- load left eye
// classificator -----------------------------------
InputStream iser = getResources().openRawResource(
R.raw.haarcascade_lefteye_2splits);
File cascadeDirER = getDir("cascadeER",
Context.MODE_PRIVATE);
File cascadeFileER = new File(cascadeDirER,
"haarcascade_eye_right.xml");
FileOutputStream oser = new FileOutputStream(cascadeFileER);
byte[] bufferER = new byte[4096];
int bytesReadER;
while ((bytesReadER = iser.read(bufferER)) != -1) {
oser.write(bufferER, 0, bytesReadER);
}
iser.close();
oser.close();
//check if you can load the classifer.
mJavaDetector = new CascadeClassifier(
mCascadeFile.getAbsolutePath());
if (mJavaDetector.empty()) {
Toast.makeText(getApplicationContext(), "face classifier error", Toast.LENGTH_LONG).show();
Log.e(TAG, "Failed to load cascade face classifier");
mJavaDetector = null;
} else
Log.i(TAG, "Loaded cascade classifier from "
+ mCascadeFile.getAbsolutePath());
mJavaDetectorEye = new CascadeClassifier(
cascadeFileER.getAbsolutePath());
if (mJavaDetectorEye.empty()) {
Toast.makeText(getApplicationContext(), "eye classifer error", Toast.LENGTH_LONG).show();
Log.e(TAG, "Failed to load cascade eye classifier");
mJavaDetectorEye = null;
} else
Log.i(TAG, "Loaded cascade classifier from "
+ mCascadeFile.getAbsolutePath());
cascadeDir.delete();
} catch (IOException e) {
e.printStackTrace();
Log.e(TAG, "Failed to load cascade. Exception thrown: " + e);
}
//Whether classifiers are opened or not, open the front camera.
// mOpenCvCameraView.setCameraIndex(1);
//mOpenCvCameraView.enableFpsMeter(); // What is this? This method enables label with fps value on the screen
// mOpenCvCameraView.enableView(); // What? This means enable connecting to the camera.
}
break;
default: {
//When the loading of the libarary is failed
super.onManagerConnected(status);
}
break;
}
}
}; // end the class.
public void onCameraViewStarted(int width, int height) {
Log.i(TAG, "onCameraViewStarted method");
//onCameraViewStarted callback will be delivered only after enableView is called and surface is available
//This method is a member of CvCameraViewListener2, and we must implement it.
mGray = new Mat(); //initialize new gray scale matrix to contain the img pixels.
mRgba = new Mat(); //initialize new rgb matrix to contain the img pixels.
}
public void onCameraViewStopped() {
Log.i(TAG, "onCameraViewStopped method");
//Release the allocated memory
//release the matrix, this releases the allocated space in memory, since mat contains a header that contains img info and a pointer that points to the matrix in the memory.
mGray.release();
mRgba.release();
mZoomWindow.release();
mZoomWindow2.release();
}
public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame inputFrame) {
Log.i(TAG, "onCameraFrame method");
//This method is a member of CvCameraViewListener2, and we must implement it.
// In this method we get every frame from the camera and process it in order to track the objects.
//inputFrame is the received frame from the camera.
mRgba = inputFrame.rgba(); //convert the frame to rgba scale, then assign this value to the rgba Mat img matrix.
mGray = inputFrame.gray(); //convert the frame to gray scale, then assign this value to the gray Mat img matrix.
//Shall we consider Flipping the camera img horizontally.
if (mAbsoluteFaceSize == 0) {
int height = mGray.rows(); //get the height of the captured frame stored in mgray Mat array (rows), why gray to rgb???
if (Math.round(height * mRelativeFaceSize) > 0) { //multiply that height with 0.2... Is the result > 0?
//if yes this indicates that there is a frame that was captured (it's height is not zero), so set the face size to
// Math.round(height * mRelativeFaceSize)
mAbsoluteFaceSize = Math.round(height * mRelativeFaceSize);
}
}
if (mZoomWindow == null || mZoomWindow2 == null)
CreateAuxiliaryMats();
MatOfRect faces = new MatOfRect(); //a matrix that will contain rectangles around the face (including the faces inside the rectangles), it will be filled by detectMultiScale method.
//if mJavaDetector is not null, this contains the face classifier that we have loaded previously
if (mJavaDetector != null)
//if not null, use this classifier to detect faces.
mJavaDetector.detectMultiScale(mGray, faces, 1.1, 2,
2, // TODO: objdetect.CV_HAAR_SCALE_IMAGE
new Size(mAbsoluteFaceSize, mAbsoluteFaceSize),
new Size());
//in th function detectMultiScale above,
// faces is the array that will contain the rectangles around the detected face.
// the 3rd param: specifies how much the image size is reduced at each image scale.
//4th param: Parameter specifying how many neighbors each candidate rectangle should have to retain it.
//5: :)
//6: Minimum possible object size. Objects smaller than that are ignored (if you set a very small minimum value, your app will run heavily).
//7: Maximum possible object size. Objects larger than that are ignored. Both minimum and maximum should be set carefully to avoid slow running of the app.
Rect[] facesArray = faces.toArray(); //array of faces
for (int i = 0; i < facesArray.length; i++) {
/* Imgproc.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(),
FACE_RECT_COLOR, 3);*/
//Now draw rectangles around the obtained faces, and a circle at each rectangle center.
//mrgba in the line bellow means that the rectangle should be drawn on the colored img.
//facesArray[i].tl() returns a Point: Template class for 2D points specified by its coordinates x and y -> Template class
// facesArray[i].x and facesArray[i].y are the x and y coords of the top left top corner.
Core.rectangle(mRgba, facesArray[i].tl(), facesArray[i].br(), FACE_RECT_COLOR, 3);
//calculate the center in x and y coords.
xCenter = (facesArray[i].x + facesArray[i].width + facesArray[i].x) / 2;
yCenter = (facesArray[i].y + facesArray[i].y + facesArray[i].height) / 2;
Point center = new Point(xCenter, yCenter); //store the center.
//Imgproc.circle(mRgba, center, 10, new Scalar(255, 0, 0, 255), 3);
Core.circle(mRgba, center, 10, new Scalar(255, 0, 0, 255), 3); //draw a red circle at the center of the face rectangle.
/*Imgproc.putText(mRgba, "[" + center.x + "," + center.y + "]",
new Point(center.x + 20, center.y + 20),
Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255,
255));*/
//write the coordinates of the rectangle center:
Core.putText(mRgba, "[" + center.x + "," + center.y + "]",
new Point(center.x + 20, center.y + 20) , // this is the bottom left corner of the text string
Core.FONT_HERSHEY_SIMPLEX, 0.7, new Scalar(255, 255, 255,
255));
Rect r = facesArray[i]; //get the currect face, we want to use it to detect the eyes inside it.
// compute the eye area
//Rect (x, y, w, h)
Rect eyearea = new Rect(r.x + r.width / 8,
(int) (r.y + (r.height / 4.5)), r.width - 2 * r.width / 8,
(int) (r.height / 3.0));
// split it
Rect eyearea_right = new Rect(r.x + r.width / 16,
(int) (r.y + (r.height / 4.5)),
(r.width - 2 * r.width / 16) / 2, (int) (r.height / 3.0));
Rect eyearea_left = new Rect(r.x + r.width / 16
+ (r.width - 2 * r.width / 16) / 2,
(int) (r.y + (r.height / 4.5)),
(r.width - 2 * r.width / 16) / 2, (int) (r.height / 3.0));
// draw the area - mGray is working grayscale mat, if you want to
// see area in rgb preview, change mGray to mRgba
/*Imgproc.rectangle(mRgba, eyearea_left.tl(), eyearea_left.br(),
new Scalar(255, 0, 0, 255), 2);
Imgproc.rectangle(mRgba, eyearea_right.tl(), eyearea_right.br(),
new Scalar(255, 0, 0, 255), 2);*/
Core.rectangle(mRgba, eyearea_left.tl(), eyearea_left.br(),
new Scalar(255, 0, 0, 255), 2);
Core.rectangle(mRgba, eyearea_right.tl(), eyearea_right.br(),
new Scalar(255, 0, 0, 255), 2);
if (learn_frames < 5) {
// no learned frames -> Learn templates from at least 5 frames..
templateR = get_template(mJavaDetectorEye, eyearea_right, 24);
templateL = get_template(mJavaDetectorEye, eyearea_left, 24);
learn_frames++;
} else {
// Learning finished, use the new templates for template
// matching
match_eye(eyearea_right, templateR, method);
match_eye(eyearea_left, templateL, method);
}
// cut eye areas and put them to zoom windows
Imgproc.resize(mRgba.submat(eyearea_left), mZoomWindow2,
mZoomWindow2.size());
Imgproc.resize(mRgba.submat(eyearea_right), mZoomWindow,
mZoomWindow.size());
}
return mRgba;
}
private void setMinFaceSize(float faceSize) {
mRelativeFaceSize = faceSize;
mAbsoluteFaceSize = 0;
}
private void CreateAuxiliaryMats() {
if (mGray.empty())
return;
int rows = mGray.rows();
int cols = mGray.cols();
if (mZoomWindow == null) {
mZoomWindow = mRgba.submat(rows / 2 + rows / 10, rows, cols / 2
+ cols / 10, cols);
mZoomWindow2 = mRgba.submat(0, rows / 2 - rows / 10, cols / 2
+ cols / 10, cols);
}
}
private void match_eye(Rect area, Mat mTemplate, int type) {
Point matchLoc;
Mat mROI = mGray.submat(area);
int result_cols = mROI.cols() - mTemplate.cols() + 1;
int result_rows = mROI.rows() - mTemplate.rows() + 1;
// Check for bad template size
if (mTemplate.cols() == 0 || mTemplate.rows() == 0) {
return ;
}
Mat mResult = new Mat(result_cols, result_rows, CvType.CV_8U);
switch (type) {
case TM_SQDIFF:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_SQDIFF);
break;
case TM_SQDIFF_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_SQDIFF_NORMED);
break;
case TM_CCOEFF:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_CCOEFF);
break;
case TM_CCOEFF_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_CCOEFF_NORMED);
break;
case TM_CCORR:
Imgproc.matchTemplate(mROI, mTemplate, mResult, Imgproc.TM_CCORR);
break;
case TM_CCORR_NORMED:
Imgproc.matchTemplate(mROI, mTemplate, mResult,
Imgproc.TM_CCORR_NORMED);
break;
}
Core.MinMaxLocResult mmres = Core.minMaxLoc(mResult);
// there is difference in matching methods - best match is max/min value
if (type == TM_SQDIFF || type == TM_SQDIFF_NORMED) {
matchLoc = mmres.minLoc;
} else {
matchLoc = mmres.maxLoc;
}
Point matchLoc_tx = new Point(matchLoc.x + area.x, matchLoc.y + area.y);
Point matchLoc_ty = new Point(matchLoc.x + mTemplate.cols() + area.x,
matchLoc.y + mTemplate.rows() + area.y);
/*Imgproc.rectangle(mRgba, matchLoc_tx, matchLoc_ty, new Scalar(255, 255, 0,
255));*/
Core.rectangle(mRgba, matchLoc_tx, matchLoc_ty, new Scalar(255, 255, 0,
255));
Rect rec = new Rect(matchLoc_tx,matchLoc_ty);
}
private Mat get_template(CascadeClassifier clasificator, Rect area, int size) {
Mat template = new Mat(); //prepare a Mat which will serve as a template for eyes.
Mat mROI = mGray.submat(area); //detect only region of interest which is represented by the area. So, from the total Mat get only the submat that represent roi.
MatOfRect eyes = new MatOfRect(); //will be around eyes (including eyes), this will be filled by detectMultiScale
Point iris = new Point(); //to identify iris.
Rect eye_template = new Rect();
clasificator.detectMultiScale(mROI, eyes, 1.15, 2,
Objdetect.CASCADE_FIND_BIGGEST_OBJECT
| Objdetect.CASCADE_SCALE_IMAGE, new Size(30, 30),
new Size());
Rect[] eyesArray = eyes.toArray(); //get the detected eyes
for (int i = 0; i < eyesArray.length;) {
Rect e = eyesArray[i];
e.x = area.x + e.x; //the starting x coordinates of the rect (area) around the eye + the area
e.y = area.y + e.y;
Rect eye_only_rectangle = new Rect((int) e.tl().x,
(int) (e.tl().y + e.height * 0.4), (int) e.width,
(int) (e.height * 0.6));
mROI = mGray.submat(eye_only_rectangle);
Mat vyrez = mRgba.submat(eye_only_rectangle);
Core.MinMaxLocResult mmG = Core.minMaxLoc(mROI);
// Imgproc.circle(vyrez, mmG.minLoc, 2, new Scalar(255, 255, 255, 255), 2);
Core.circle(vyrez, mmG.minLoc, 2, new Scalar(255, 255, 255, 255), 2);
iris.x = mmG.minLoc.x + eye_only_rectangle.x;
iris.y = mmG.minLoc.y + eye_only_rectangle.y;
eye_template = new Rect((int) iris.x - size / 2, (int) iris.y
- size / 2, size, size);
/*Imgproc.rectangle(mRgba, eye_template.tl(), eye_template.br(),
new Scalar(255, 0, 0, 255), 2);*/
Core.rectangle(mRgba, eye_template.tl(), eye_template.br(),
new Scalar(255, 0, 0, 255), 2);
template = (mGray.submat(eye_template)).clone();
return template;
}
return template;
}
public void onRecreateClick(View v)
{
learn_frames = 0;
}
}
Notice that the camera opens successfully for recording, and the xml files are loaded, but nothing happens after that. I made the window size as 100 x 100 just for testing purposes, I know it should be 1 x 1.
Can anyone please tell me how to solve this problem? How can I run opencv video camera for face and eye tracking from background service?
I tried to get the opencv camera in a service as you are doing but I was unable to get neither onCameraFrame nor onCameraViewStarted callbacks, which meant that the camera was not getting initialized. After a bunch of tries:
Setting the preview to INVISIBLE/GONE -> not working
Setting the preview size to a pixel size of 1×1 or respecting
camera's aspect ratio 4x3 ->not working
Setting the preview outside the screen -> not working
I found out that opencv camera needs to be previewed with view's size, only that way I was able to get onCameraFrame callback.
Fortunately, I could place another element on top of the camera preview to hide it, and show the alarms only.
You could find a simple CameraInService example here, hope it is useful for you.

How to apply the grayscale effect to the Bitmap image?

I want to convert the bitmap image into the Grayscale.For that i am using the NDK to boost the app performance.I have apply the other effect successfully.
Problem ::
The code which i am using to apply the grayscale is taken from the C##.So,i want to convert it into the NDK.I am unable to do that part ..
For reference, i am putting the code of how i have applied the cyan effect to image.
Below pasted code is working fine.
void applyCyano(Bitmap* bitmap) {
//Cache to local variables
unsigned char* red = (*bitmap).red;
unsigned char* green = (*bitmap).green;
unsigned char* blue = (*bitmap).blue;
unsigned int length = (*bitmap).width * (*bitmap).height;
register unsigned int i;
register unsigned char grey, r, g, b;
for (i = length; i--;) {
grey = ((red[i] * 0.222f) + (green[i] * 0.222f) + (blue[i] * 0.222f));
r = componentCeiling(61.0f + grey);
g = componentCeiling(87.0f + grey);
b = componentCeiling(136.0f + grey);
grey = blackAndWhite(red[i], green[i], blue[i]);
red[i] = overlayPixelComponents(grey, r, 0.9f);
green[i] = overlayPixelComponents(grey, g, 0.9f);
blue[i] = overlayPixelComponents(grey, b, 0.9f);
}
}
The code to appply Grayscale effect(taken from C## example over the net) ::
void applyGrayscaleNatively(Bitmap* original)
{
//create an empty bitmap the same size as original
Bitmap newBitmap = new Bitmap(original.Width, original.Height);
//lock the original bitmap in memory
BitmapData originalData = original.LockBits(
new Rectangle(0, 0, original.Width, original.Height),
ImageLockMode.ReadOnly, PixelFormat.Format24bppRgb);
//lock the new bitmap in memory
BitmapData newData = newBitmap.LockBits(
new Rectangle(0, 0, original.Width, original.Height),
ImageLockMode.WriteOnly, PixelFormat.Format24bppRgb);
//set the number of bytes per pixel
int pixelSize = 3;
for (int y = 0; y < original.Height; y++)
{
//get the data from the original image
byte* oRow = (byte*)originalData.Scan0 + (y * originalData.Stride);
//get the data from the new image
byte* nRow = (byte*)newData.Scan0 + (y * newData.Stride);
for (int x = 0; x < original.Width; x++)
{
//create the grayscale version
byte grayScale =
(byte)((oRow[x * pixelSize] * .11) + //B
(oRow[x * pixelSize + 1] * .59) + //G
(oRow[x * pixelSize + 2] * .3)); //R
//set the new image's pixel to the grayscale version
nRow[x * pixelSize] = grayScale; //B
nRow[x * pixelSize + 1] = grayScale; //G
nRow[x * pixelSize + 2] = grayScale; //R
}
}
//unlock the bitmaps
newBitmap.UnlockBits(newData);
original.UnlockBits(originalData);
}
What i want to do ::
I have taken this project from the here which have set the different effect to the image but not the grayscale.so how to apply the grayscale to the code so that all the other funcationality of the project won't stop.
Let me know if you need anything from me .
Many Thanks in Advance..
Please help me to resovle this issue as just because of this am not able to go further in my project.
Use the following function to convert bitmap to its grayscale equivalent in Android instead of converting the C# version
public Bitmap toGrayscale(Bitmap bmpOriginal){
int width, height;
height = bmpOriginal.getHeight();
width = bmpOriginal.getWidth();
Bitmap bmpGrayscale = Bitmap.createBitmap(width, height, Bitmap.Config.RGB_565);
Canvas c = new Canvas(bmpGrayscale);
Paint paint = new Paint();
ColorMatrix cm = new ColorMatrix();
cm.setSaturation(0);
ColorMatrixColorFilter f = new ColorMatrixColorFilter(cm);
paint.setColorFilter(f);
c.drawBitmap(bmpOriginal, 0, 0, paint);
return bmpGrayscale;
}
I solved the issue ::
Here is my solution ...
void applyGrayscaleNatively(Bitmap* bitmap)
{
register unsigned int i;
unsigned int length = (*bitmap).width * (*bitmap).height;
register unsigned char grey;
unsigned char* red = (*bitmap).red;
unsigned char* green = (*bitmap).green;
unsigned char* blue = (*bitmap).blue;
float matrix[4][4];
identMatrix(matrix);
float saturation = 1.0f;
saturateMatrix(matrix, &saturation);
applyMatrix(bitmap, matrix);
for (i = length; i--;) {
float value;
getBrightness(red[i], green[i], blue[i], &value);
grey = grayScale(red[i], green[i], blue[i]);
red[i] = grey;
green[i] = grey;
blue[i] = grey;
}
}

Image Perspective Transform using Android OpenCV

I am making Android App which can make Perspective Transform Image.
I want to do same thing like below code.
I tried but I cant read C code. Please advise me!
Thanks,
Shoichi
Sample Code(I want to do)
#include <cv.h>
#include <highgui.h>
int
main (int argc, char **argv)
{
IplImage *src_img = 0, *dst_img = 0;
CvMat *map_matrix;
CvPoint2D32f src_pnt[4], dst_pnt[4];
if (argc >= 2)
src_img = cvLoadImage (argv[1], CV_LOAD_IMAGE_ANYDEPTH | CV_LOAD_IMAGE_ANYCOLOR);
if (src_img == 0)
return -1;
dst_img = cvCloneImage (src_img);
src_pnt[0] = cvPoint2D32f (150.0, 150.0);
src_pnt[1] = cvPoint2D32f (150.0, 300.0);
src_pnt[2] = cvPoint2D32f (350.0, 300.0);
src_pnt[3] = cvPoint2D32f (350.0, 150.0);
dst_pnt[0] = cvPoint2D32f (200.0, 200.0);
dst_pnt[1] = cvPoint2D32f (150.0, 300.0);
dst_pnt[2] = cvPoint2D32f (350.0, 300.0);
dst_pnt[3] = cvPoint2D32f (300.0, 200.0);
map_matrix = cvCreateMat (3, 3, CV_32FC1);
cvGetPerspectiveTransform (src_pnt, dst_pnt, map_matrix);
cvWarpPerspective (src_img, dst_img, map_matrix, CV_INTER_LINEAR + CV_WARP_FILL_OUTLIERS, cvScalarAll (100));
cvNamedWindow ("src", CV_WINDOW_AUTOSIZE);
cvNamedWindow ("dst", CV_WINDOW_AUTOSIZE);
cvShowImage ("src", src_img);
cvShowImage ("dst", dst_img);
cvWaitKey (0);
cvDestroyWindow ("src");
cvDestroyWindow ("dst");
cvReleaseImage (&src_img);
cvReleaseImage (&dst_img);
cvReleaseMat (&map_matrix);
return 1;
}
My Code
#Override
public void onCreate(Bundle savedInstanceState) {
Log.i(TAG, "onCreate");
super.onCreate(savedInstanceState);
requestWindowFeature(Window.FEATURE_NO_TITLE);
setContentView(R.layout.main);
Resources r = getResources();
Bitmap inputBitmap = BitmapFactory.decodeResource(r, R.drawable.icon);
Mat inputMat = Utils.bitmapToMat(inputBitmap);
Mat outputMat = inputMat.clone();
List<Point> src_pnt = new ArrayList<Point>();
Point p0 = new Point(75.0, 75.0);
src_pnt.add(p0);
Point p1 = new Point(75.0, 100.0);
src_pnt.add(p1);
Point p2 = new Point(100.0, 100.0);
src_pnt.add(p2);
Point p3 = new Point(100.0, 75.0);
src_pnt.add(p3);
Mat startM = Converters.vector_Point2f_to_Mat(src_pnt);
List<Point> dst_pnt = new ArrayList<Point>();
Point p4 = new Point(75.0, 75.0);
dst_pnt.add(p4);
Point p5 = new Point(75.0, 100.0);
dst_pnt.add(p5);
Point p6 = new Point(100.0, 100.0);
dst_pnt.add(p6);
Point p7 = new Point(100.0, 75.0);
dst_pnt.add(p7);
Mat endM = Converters.vector_Point2f_to_Mat(dst_pnt);
Mat M = new Mat(3, 3, CvType.CV_32F);
Core.perspectiveTransform(startM, endM, M);
Size size = new Size(200.0, 200.0);
Scalar scalar = new Scalar(50.0);
Imgproc.warpPerspective(inputMat, outputMat, M, size, Imgproc.INTER_LINEAR + Imgproc.CV_WARP_FILL_OUTLIERS, Imgproc.BORDER_DEFAULT, scalar);
Bitmap outputBitmap = inputBitmap;
Utils.matToBitmap(outputMat, outputBitmap);
ImageView imageView1 = (ImageView) findViewById(R.id.imageView1);
imageView1.setImageBitmap(outputBitmap);
}
Your dst_pnt points are same as src_pnt, so transformation matrix will be identity and will not change image at all. Use some other points.
Secondly, I think 4th argument to warPerspective (size) should be size of outputMap, so if you want a 200x200 image,instead of
Mat outputMat = inputMat.clone();
Use
Mat outputMat = new Mat(200, 200);

Categories

Resources