get lenghths and angles of detected straight Lines - android

I am working on an android app. I can detect lines using Canny and Hough transform but I don't know how to get the angles and lengths of the detected lines, can you help please? Here's the code:
public void countStraightLines(View view) {
try{
int iCannyLowerThreshold = 45;
int iCannyUpperThreshold = 75;
Mat rgba = Utils.loadResource(getApplicationContext(), res);
Bitmap bmp = Bitmap.createBitmap(rgba.width(), rgba.height(), Bitmap.Config.ARGB_8888);
Mat gray = new Mat ();
Imgproc.cvtColor(rgba, gray, Imgproc.COLOR_BGRA2GRAY, 4);
Imgproc.Canny(gray, gray, iCannyLowerThreshold, iCannyUpperThreshold);
Utils.matToBitmap(gray, bmp);
imgSource.setImageBitmap(bmp);
} catch (IOException e) {
Log.e(TAG, "ERROR Loading Mat");
e.printStackTrace();
}
}
#Override
public void onItemSelected(AdapterView<?> parent, View view, int position,
long id) {
Globals.pictSelected=parent.getItemAtPosition(position).toString();
res = getResources().getIdentifier(parent.getItemAtPosition(position).toString(), "drawable", this.getPackageName());
try {
Mat rgba = Utils.loadResource(getApplicationContext(), res);
Bitmap bmp = Bitmap.createBitmap(rgba.width(), rgba.height(), Bitmap.Config.ARGB_8888);
Mat gray = new Mat ();
Imgproc.cvtColor(rgba, gray, Imgproc.COLOR_BGRA2GRAY, 4);
int iCannyLowerThreshold = 45;
int iCannyUpperThreshold = 75;
int iHoughLinesThreshold = 50;
int iHoughLinesMinLineSize = 40;
int iHoughLinesGap = 20;
Imgproc.Canny(gray, gray, iCannyLowerThreshold, iCannyUpperThreshold);
Mat lines = new Mat();
Imgproc.HoughLinesP(gray, lines, 1, Math.PI/180, iHoughLinesThreshold, iHoughLinesMinLineSize, iHoughLinesGap);
int x = 0;
char s = 'N';
for (; x < Math.min(lines.cols(), 100); x++)
{
double[] vec = lines.get(0, x);
if (vec == null)
break;
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Core.line(rgba, start, end, new Scalar(255, 0, 0, 255), 1);
if (x >= 40){s = 'C';}
else {s = 'S';}
}
text.setText("Line Count: " + x + " and The Picture is " + s);
Utils.matToBitmap(rgba, bmp);
imgSource.setImageBitmap(bmp);
} catch (IOException e) {
Log.e(TAG, "ERROR Loading Mat");
e.printStackTrace();
}
}
Thank you!

Well, it looks like you have a start point and an end point- the length is just ((starty-endy)^2+(startx-endx)^2)^(1/2). You can get the angle between them by using the fact that the dot product of two vectors A and B A.B=|A|*|B|*cos(alpha) where alpha is the angle between them. So alpha= arccos((A.B)/(|A|*|B|)).

Related

Android : How to put object on particular location of screen using openCv?

Edited:
If anyone one can suggest any link or post, I will be very grateful. I am trying to find solution from two days and i can't find any.Thank you in advance.
I am trying to put one object(image) on particular location of screen Using openCv in android.
I do have Points like this "{680.0, 488.0}" which is having (x,y) coordinate,
So how can I find particular location in my screen for putting object ?
Below is my code where i am getting Point:
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
mRgba = inputFrame.rgba();
mGray = inputFrame.gray();
iThreshold = minTresholdSeekbar.getProgress();
//Imgproc.blur(mRgba, mRgba, new Size(5,5));
Imgproc.GaussianBlur(mRgba, mRgba, new org.opencv.core.Size(3, 3), 1, 1);
//Imgproc.medianBlur(mRgba, mRgba, 3);
if (!mIsColorSelected) return mRgba;
List<MatOfPoint> contours = mDetector.getContours();
mDetector.process(mRgba);
Log.d(TAG, "Contours count: " + contours.size());
if (contours.size() <= 0) {
return mRgba;
}
RotatedRect rect = Imgproc.minAreaRect(new MatOfPoint2f(contours.get(0).toArray()));
double boundWidth = rect.size.width;
double boundHeight = rect.size.height;
int boundPos = 0;
for (int i = 1; i < contours.size(); i++) {
rect = Imgproc.minAreaRect(new MatOfPoint2f(contours.get(i).toArray()));
if (rect.size.width * rect.size.height > boundWidth * boundHeight) {
boundWidth = rect.size.width;
boundHeight = rect.size.height;
boundPos = i;
}
}
Rect boundRect = Imgproc.boundingRect(new MatOfPoint(contours.get(boundPos).toArray()));
//Core/Imgproc.rectangle( mRgba, boundRect.tl(), boundRect.br(), CONTOUR_COLOR_WHITE, 2, 8, 0 );
Log.d(TAG,
" Row start [" +
(int) boundRect.tl().y + "] row end [" +
(int) boundRect.br().y + "] Col start [" +
(int) boundRect.tl().x + "] Col end [" +
(int) boundRect.br().x + "]");
int rectHeightThresh = 0;
double a = boundRect.br().y - boundRect.tl().y;
a = a * 0.7;
a = boundRect.tl().y + a;
Log.d(TAG,
" A [" + a + "] br y - tl y = [" + (boundRect.br().y - boundRect.tl().y) + "]");
//Core.rectangle( mRgba, boundRect.tl(), boundRect.br(), CONTOUR_COLOR, 2, 8, 0 );
//Core/Imgproc.rectangle( mRgba, boundRect.tl(), new Point(boundRect.br().x, a), CONTOUR_COLOR, 2, 8, 0 );
MatOfPoint2f pointMat = new MatOfPoint2f();
Imgproc.approxPolyDP(new MatOfPoint2f(contours.get(boundPos).toArray()), pointMat, 3, true);
contours.set(boundPos, new MatOfPoint(pointMat.toArray()));
MatOfInt hull = new MatOfInt();
MatOfInt4 convexDefect = new MatOfInt4();
Imgproc.convexHull(new MatOfPoint(contours.get(boundPos).toArray()), hull);
if (hull.toArray().length < 3) return mRgba;
Imgproc.convexityDefects(new MatOfPoint(contours.get(boundPos).toArray()), hull, convexDefect);
List<MatOfPoint> hullPoints = new LinkedList<MatOfPoint>();
List<Point> listPo = new LinkedList<Point>();
for (int j = 0; j < hull.toList().size(); j++) {
listPo.add(contours.get(boundPos).toList().get(hull.toList().get(j)));
}
MatOfPoint e = new MatOfPoint();
e.fromList(listPo);
hullPoints.add(e);
List<MatOfPoint> defectPoints = new LinkedList<MatOfPoint>();
List<Point> listPoDefect = new LinkedList<Point>();
for (int j = 0; j < convexDefect.toList().size(); j = j + 4) {
Point farPoint = contours.get(boundPos).toList().get(convexDefect.toList().get(j + 2));
Integer depth = convexDefect.toList().get(j + 3);
if (depth > iThreshold && farPoint.y < a) {
listPoDefect.add(contours.get(boundPos).toList().get(convexDefect.toList().get(j + 2)));
}
Log.d(TAG, "defects [" + j + "] " + convexDefect.toList().get(j + 3));
}
MatOfPoint e2 = new MatOfPoint();
e2.fromList(listPo);
defectPoints.add(e2);
Log.d(TAG, "hull: " + hull.toList());
Log.d(TAG, "defects: " + convexDefect.toList());
Imgproc.drawContours(mRgba, hullPoints, -1, CONTOUR_COLOR, 3);
int defectsTotal = (int) convexDefect.total();
Log.d(TAG, "Defect total " + defectsTotal);
this.numberOfFingers = listPoDefect.size();
if (this.numberOfFingers > 5) {
this.numberOfFingers = 5;
} /*else if (this.numberOfFingers == 1) {
this.numberOfFingers = 0;
}
*/
mHandler.post(mUpdateFingerCountResults);
runOnUiThread(new Runnable() {
#Override
public void run() {
ring.setVisibility(View.VISIBLE);
/*LinearLayout.LayoutParams parms = new LinearLayout.LayoutParams(10,10);
ring.setLayoutParams(parms);*/
}
});
for (Point p : listPoDefect) {
Log.e("Points", p.toString());
// Imgproc.circle(mRgba, p, 6, new Scalar(255,0,255));
}
return mRgba;
}
Below is the method which i have used to save image and display. Now i need to put ring on one of captured hand fingers.
private void saveImage() {
if (MainActivity.listPoDefect.size() >= 5) {
mIsColorSelected = false;
if (listPoDefect.size() != 0) {
for (Point p :listPoDefect) {
Log.d(TAG, "before sorting X =" + String.valueOf(p.x) + " Y = " + String.valueOf(p.y));
}
Collections.sort(listPoDefect, new Comparator<Point>() {
public int compare(Point o1, Point o2) {
return Double.compare(o1.x, o2.x);
}
});
Log.d(TAG, "After Sorting ");
for (Point p : listPoDefect) {
Log.d(TAG, "after sorting X =" + String.valueOf(p.x) + " Y = " + String.valueOf(p.y));
}
}
mIsColorSelected = false;
Bitmap bitmap5 = Bitmap.createBitmap(mRgbaWithoutLine.cols(), mRgbaWithoutLine.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mRgbaWithoutLine, bitmap5);
bitmap = bitmap5;
//Create a new image bitmap and attach a brand new canvas to it
Bitmap tempBitmap = Bitmap.createBitmap(bitmap5.getWidth(), bitmap5.getHeight(), Bitmap.Config.RGB_565);
Canvas tempCanvas = new Canvas(tempBitmap);
//Draw the image bitmap into the cavas
tempCanvas.drawBitmap(bitmap5, 0, 0, null);
double scaledWidth = bitmap5.getWidth();
double scaledHeight = bitmap5.getHeight();
double xScaleFactor = scaledWidth / bitmap5.getWidth();
double yScaleFactor = scaledHeight / bitmap5.getHeight();
Paint myRectPaint = new Paint();
myRectPaint.setStyle(Paint.Style.STROKE);
myRectPaint.setColor(Color.RED);
myRectPaint.setStrokeWidth(5);
myRectPaint.setAntiAlias(true);
//this is zeroth position manipulation
double differenceX= listPoDefect.get(2).x-listPoDefect.get(1).x;
double differenceY= listPoDefect.get(2).y-listPoDefect.get(1).y;
double zeroPostionX=listPoDefect.get(1).x-differenceX;
double zeroPostionY=listPoDefect.get(1).y-differenceY;
Point pointZeroths=listPoDefect.get(0);
Point pointNew=new Point(zeroPostionX,zeroPostionY);
listPoDefect.remove(0);
listPoDefect.add(0,pointNew);
double thirdPostionX=listPoDefect.get(2).x+differenceX;
double thirdPostionY=listPoDefect.get(2).y+differenceY;
Point thirdpointNew=new Point(thirdPostionX,thirdPostionY);
listPoDefect.remove(3);
listPoDefect.add(3,thirdpointNew);
// Point pointNewThird=new Point(pointthird.x+differenc,pointthird.y);
// HomeActivity.listPoDefect.remove(3);
// HomeActivity.listPoDefect.add(3,pointNewThird);
Paint paint_text = new Paint();
paint_text.setColor(Color.WHITE);
paint_text.setStyle(Paint.Style.FILL);
paint_text.setTextSize(30);
for (int row = 0; row < 4; row++) { // draw 2 rows
Point point1 = null;
point1 = listPoDefect.get(row);
android.graphics.Point canvas_point1 = new android.graphics.Point((int) ((point1.x * xScaleFactor)), (int) ((point1.y * yScaleFactor)));
Log.d(TAG, "after sorting canvas_point1 ="+"Raw ="+row +" " + String.valueOf(canvas_point1.x) + " Y = " + String.valueOf(canvas_point1.y));
Log.d(TAG, "====================================================================================================");
if(pointFListGraphies.size()!=4)
{
pointFListGraphies.add(new PointF(canvas_point1));
}
// tempCanvas.drawRect(canvas_point1.x, canvas_point1.y, canvas_point1.x + 130, canvas_point1.y + 50, myRectPaint);
// tempCanvas.drawText(String.valueOf(row+"-"+canvas_point1.x), canvas_point1.x, canvas_point1.y, paint_text);
}
Log.d(TAG, "====================================================================================================");
for (int row = 0; row < pointFListGraphies.size(); row++) { // draw 2 rows
PointF point1 = null;
point1 = pointFListGraphies.get(row);
Log.d(TAG, "=========pointF X="+point1.x +"poninF Y =" +point1.y);
}
tempbitmap = tempBitmap;
handImage.setVisibility(View.VISIBLE);
handImage.setImageBitmap(tempbitmap);
/* Bitmap src = BitmapFactory.decodeResource(getResources(), R.drawable.ring);
tempCanvas.drawBitmap();*/
onCameraViewStopped();
//finish();
}
else {
}
}
Can anyone help me?
Thanks in advance.
OpenCV has a number of functions for writing on an image, which is usually what you are displaying on the screen.
FOr example the function to write text a particular location is
void cv::putText ( InputOutputArray img,
const String & text,
Point org,
int fontFace,
double fontScale,
Scalar color,
int thickness = 1,
int lineType = LINE_8,
bool bottomLeftOrigin = false
)
The parameters are:
Parameters
img Image.
text Text string to be drawn.
org Bottom-left corner of the text string in the image.
fontFace Font type, see cv::HersheyFonts.
fontScale Font scale factor that is multiplied by the font-specific base size.
color Text color.
thickness Thickness of the lines used to draw a text.
lineType Line type. See the line for details.
bottomLeftOrigin When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner.
You can find an overview of the drawing function here: https://docs.opencv.org/3.1.0/dc/da5/tutorial_py_drawing_functions.html
In your case, so long as your image covers the whole screen this allows you plot exactly where you want the text, or object or whatever you want too draw to appear.

CV exception in android

This is the exception im getting
CvException [org.opencv.core.CvException: cv::Exception: /hdd2/buildbot/slaves/slave_ardbeg1/50-SDK/opencv/modules/imgproc/src/hough.cpp:712: error: (-5) The source image must be 8-bit, single-channel in function CvSeq* cvHoughLines2(CvArr*, void*, int, double, double, int, double, double)
mat = new Mat();
edges = new Mat();
lines = new Mat();
mRgba = new Mat(612, 816, CvType.CV_8UC1);
Utils.bitmapToMat(bitmap, mat);
Imgproc.Canny(mat, edges, 50, 90);
int threshold = 50;
int minLineSize = 20;
int lineGap = 20;
try {
Imgproc.HoughLines(mat, lines, 1, Math.PI / 180, threshold, minLineSize, lineGap);
for (int x = 0; x < lines.cols(); x++) {
double[] vec = lines.get(0, x);
double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
Core.line(mRgba, start, end, new Scalar(255, 0, 0), 3);
}
Bitmap bmp = Bitmap.createBitmap(mRgba.cols(), mRgba.rows(), Bitmap.Config.ARGB_8888);
Utils.matToBitmap(mRgba, bmp);
bitmap = bmp;
} catch (Exception e) {
e.printStackTrace();
System.out.println("e = " + e);
}
Your image in function HoughLines isn't right. You're not formatting it right before putting it into the function.
Try prepare image like this:
https://stackoverflow.com/a/7975315/5577679

How to detect (Count) Hair from image using OpenCV?

I have try below code using OpenCV functions cvtColor,Canny and HoughLinesP but not able to get accurate result or not work in some cases.
private boolean opencvProcessCount(Uri picFileUri) {
hairCount = 0;
totalC = 0;
//Log.e(">>>>>>>>","count " + picFileUri);
try {
InputStream iStream = getContentResolver().openInputStream(picFileUri);
byte[] im = getBytes(iStream);
BitmapFactory.Options opt = new BitmapFactory.Options();
opt.inDither = true;
opt.inPreferredConfig = Bitmap.Config.ARGB_8888;
Bitmap image = BitmapFactory.decodeByteArray(im, 0, im.length);
Mat mYuv = new Mat();
Utils.bitmapToMat(image, mYuv);
Mat mRgba = new Mat();
Imgproc.cvtColor(mYuv, mRgba, Imgproc.COLOR_RGB2GRAY, 4);
Imgproc.Canny(mRgba, mRgba, 80, 90);
Mat lines = new Mat();
int threshold = 80;
int minLineSize = 30;
int lineGap = 100;
Imgproc.HoughLinesP(mRgba, lines, 1, Math.PI/180, threshold, minLineSize, lineGap);
for (int x = 0; x < lines.rows(); x++)
{
double[] vec = lines.get(x, 0);
double x1 = vec[0],
y1 = vec[1],
x2 = vec[2],
y2 = vec[3];
Point start = new Point(x1, y1);
Point end = new Point(x2, y2);
double dx = x1 - x2;
double dy = y1 - y2;
double dist = Math.sqrt (dx*dx + dy*dy);
totalC ++;
Log.e(">>>>>>>>","dist " + dist);
if(dist>300.d)
{
hairCount ++;
// Log.e(">>>>>>>>","count " + x);
Imgproc.line(mRgba, start, end, new Scalar(0,255, 0, 255),5);// here initimg is the original image.
}// show those lines that have length greater than 300
}
Log.e(">>>>>>>>",totalC+" out hairCount " + hairCount);
// Imgproc.
} catch (Throwable e) {
// Log.e(">>>>>>>>","count " + e.getMessage());
e.printStackTrace();
}
return false;
}
Below are sample images to count hair :
I think you will find this article interesting:
http://www.cs.ubc.ca/~lowe/papers/aij87.pdf
They take a 2D bitmap, apply canny edge detector and then regroup segments of the different edges based on how likely they belong to a same object - in this case hair (and give criterias for such regrouping).
I think you could use this to know how many objects there are on the image, and if the image contains only hair, then you'd have a count for hair.

Android - Template Matching

I want to create an android application. Program steps are below
Open camera
Get frames
Select a frame by touch screen
Load template image under drawable folder
Apply template matching
Show result
The mat object of template image is not empty. I check it. When I run this code, I get below error message.
Code :
public void onCameraViewStarted(int width, int height) {
mRgba = new Mat(height, width, CvType.CV_8UC4);
temp = new Mat(height, width, CvType.CV_8UC4);
}
public boolean onTouch(View v, MotionEvent event) {
int cols = mRgba.cols();
int rows = mRgba.rows();
int xOffset = (mOpenCvCameraView.getWidth() - cols) / 2;
int yOffset = (mOpenCvCameraView.getHeight() - rows) / 2;
int x = (int)event.getX() - xOffset;
int y = (int)event.getY() - yOffset;
Log.i(TAG, "Touch image coordinates: (" + x + ", " + y + ")");
if ((x < 0) || (y < 0) || (x > cols) || (y > rows)) return false;
mIsColorSelected = true;
return true; // don't need subsequent touch events
}
private static Mat readInputStreamIntoMat(InputStream inputStream) throws IOException {
// Read into byte-array
byte[] temporaryImageInMemory = readStream(inputStream);
// Decode into mat. Use any IMREAD_ option that describes your image appropriately
Mat outputImage = Highgui.imdecode(new MatOfByte(temporaryImageInMemory), Highgui.IMREAD_GRAYSCALE);
return outputImage;
}
private static byte[] readStream(InputStream stream) throws IOException {
// Copy content of the image to byte-array
ByteArrayOutputStream buffer = new ByteArrayOutputStream();
int nRead;
byte[] data = new byte[16384];
while ((nRead = stream.read(data, 0, data.length)) != -1) {
buffer.write(data, 0, nRead);
}
buffer.flush();
byte[] temporaryImageInMemory = buffer.toByteArray();
buffer.close();
stream.close();
return temporaryImageInMemory;
}
public Mat onCameraFrame(CvCameraViewFrame inputFrame) {
if(mIsColorSelected) {
InputStream inpT = getResources().openRawResource(R.drawable.imgt);
Mat mTemp;
try {
mRgba.copyTo(temp);
mTemp = readInputStreamIntoMat(inpT);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
// / Create the result matrix
int result_cols = temp.cols() - mTemp.cols() + 1;
int result_rows = temp.rows() - mTemp.rows() + 1;
Mat result = new Mat(result_rows, result_cols, CvType.CV_32FC1);
int match_method = 4;
// / Do the Matching and Normalize
Imgproc.matchTemplate(temp, mTemp, result, match_method);
Core.normalize(result, result, 0, 1, Core.NORM_MINMAX, -1, new Mat());
/*
Localizing the best match with minMaxLoc
MinMaxLocResult mmr = Core.minMaxLoc(result);
Point matchLoc;
if (match_method == Imgproc.TM_SQDIFF || match_method == Imgproc.TM_SQDIFF_NORMED) {
matchLoc = mmr.minLoc;
} else {
matchLoc = mmr.maxLoc;
}/*
// / Show me what you got
Core.rectangle(temp, matchLoc, new Point(matchLoc.x + mTemp.cols(),
matchLoc.y + mTemp.rows()), new Scalar(0, 255, 0));*/
return temp;
}
else {
mRgba = inputFrame.rgba();
}
return mRgba;
}
For template matching , both source image and template image must be of same data type(1). Here your template image(mTemp) is a gray scale image and source image( mRgba / temp ) is a color image with alpha channel.
So, lets change both source and template images to be gray scale images
temp = new Mat(height, width, CvType.CV_8UC1);
and replace mRgba.copyTo(temp) with
Imgproc.cvtColor(mRgba, temp, Imgproc.COLOR_RGBA2GRAY);

Full bitmap is not being populated with pixel data after parallelization.

I have an app that processes a bitmap with a spherize distortion. You can touch the screen and set the radius of a circle that will contain the distortion. Once the distort button is pressed a subset bitmap is created the same size of the radius and this subset bitmap is sent for processing. Once the subset is distorted it is put back on the original bitmap as an overlay using the x,y cords from the original touch event.
Everything works fine apart from that the last line of pixels (across the bottom) of the subset bitmap is not populated with pixel data. It looks like there is a black line at the bottom of the subset bitmap. The distortion class uses parallel programming. This checks the hardware at runtime to find out how many processor are available and the splits the bitmap up over the processor accordingly. I've had help with the parallelization and not sure how to find out why the black line is present. The looping seems to be in order, any ideas? Thanks in advance Matt.
.
import java.util.concurrent.Callable;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.FutureTask;
import android.graphics.Bitmap;
import android.os.Debug;
import android.util.Log;
public class MultiRuntimeProcessorFilter {
private static final String TAG = "mrpf";
private int x = 0;
private Bitmap input = null;
private int radius;
public void createBitmapSections(int nOp, int[] sections){
int processors = nOp;
int jMax = input.getHeight();
int aSectionSize = (int) Math.ceil(jMax/processors);
Log.e(TAG, "++++++++++ sections size = "+aSectionSize);
int k = 0;
for(int h=0; h<processors+1; h++){
sections[h] = k;
k+= aSectionSize;
}
}// end of createBitmapSections()
#SuppressWarnings("unchecked")
public Bitmap barrel (Bitmap input, float k, int r){
this.radius = r;
this.input = input;
int []arr = new int[input.getWidth()*input.getHeight()];
int nrOfProcessors = Runtime.getRuntime().availableProcessors();
int[] sections = new int[nrOfProcessors+1];
createBitmapSections(nrOfProcessors,sections);
ExecutorService threadPool = Executors.newFixedThreadPool(nrOfProcessors);
for(int g=0; g<sections.length;g++){
Log.e(TAG, "++++++++++ sections= "+sections[g]);
}
// ExecutorService threadPool = Executors.newFixedThreadPool(nrOfProcessors);
Object[] task = new Object[nrOfProcessors];
for(int z = 0; z < nrOfProcessors; z++){
task[z] = (FutureTask<PartialResult>) threadPool.submit(new PartialProcessing(sections[z], sections[z+1] - 1, input, k));
Log.e(TAG, "++++++++++ task"+z+"= "+task[z].toString());
}
PartialResult[] results = new PartialResult[nrOfProcessors];
try{
for(int t = 0; t < nrOfProcessors; t++){
results[t] = ((FutureTask<PartialResult>) task[t]).get();
results[t].fill(arr);
}
}catch(Exception e){
e.printStackTrace();
}
Bitmap dst2 = Bitmap.createBitmap(arr,input.getWidth(),input.getHeight(),input.getConfig());
return dst2;
}//end of barrel()
public class PartialResult {
int startP;
int endP;
int[] storedValues;
public PartialResult(int startp, int endp, Bitmap input){
this.startP = startp;
this.endP = endp;
this.storedValues = new int[input.getWidth()*input.getHeight()];
}
public void addValue(int p, int result) {
storedValues[p] = result;
}
public void fill(int[] arr) {
for (int p = startP; p < endP; p++){
for(int b=0;b<radius;b++,x++)
arr[x] = storedValues[x];
}
Log.e(TAG, "++++++++++ x ="+x);
}
}//end of partialResult
public class PartialProcessing implements Callable<PartialResult> {
int startJ;
int endJ;
private int[] scalar;
private float xscale;
private float yscale;
private float xshift;
private float yshift;
private float thresh = 1;
private int [] s1;
private int [] s2;
private int [] s3;
private int [] s4;
private int [] s;
private Bitmap input;
private float k;
public PartialProcessing(int startj, int endj, Bitmap input, float k) {
this.startJ = startj;
this.endJ = endj;
this.input = input;
this.k = k;
s = new int[4];
scalar = new int[4];
s1 = new int[4];
s2 = new int[4];
s3 = new int[4];
s4 = new int[4];
}
int [] getARGB(Bitmap buf,int x, int y){
int rgb = buf.getPixel(y, x); // Returns by default ARGB.
// int [] scalar = new int[4];
// scalar[0] = (rgb >>> 24) & 0xFF;
scalar[1] = (rgb >>> 16) & 0xFF;
scalar[2] = (rgb >>> 8) & 0xFF;
scalar[3] = (rgb >>> 0) & 0xFF;
return scalar;
}
float getRadialX(float x,float y,float cx,float cy,float k){
x = (x*xscale+xshift);
y = (y*yscale+yshift);
float res = x+((x-cx)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
return res;
}
float getRadialY(float x,float y,float cx,float cy,float k){
x = (x*xscale+xshift);
y = (y*yscale+yshift);
float res = y+((y-cy)*k*((x-cx)*(x-cx)+(y-cy)*(y-cy)));
return res;
}
float calc_shift(float x1,float x2,float cx,float k){
float x3 = (float)(x1+(x2-x1)*0.5);
float res1 = x1+((x1-cx)*k*((x1-cx)*(x1-cx)));
float res3 = x3+((x3-cx)*k*((x3-cx)*(x3-cx)));
if(res1>-thresh && res1 < thresh)
return x1;
if(res3<0){
return calc_shift(x3,x2,cx,k);
}
else{
return calc_shift(x1,x3,cx,k);
}
}
void sampleImage(Bitmap arr, float idx0, float idx1)
{
// s = new int [4];
if(idx0<0 || idx1<0 || idx0>(arr.getHeight()-1) || idx1>(arr.getWidth()-1)){
s[0]=0;
s[1]=0;
s[2]=0;
s[3]=0;
return;
}
float idx0_fl=(float) Math.floor(idx0);
float idx0_cl=(float) Math.ceil(idx0);
float idx1_fl=(float) Math.floor(idx1);
float idx1_cl=(float) Math.ceil(idx1);
s1 = getARGB(arr,(int)idx0_fl,(int)idx1_fl);
s2 = getARGB(arr,(int)idx0_fl,(int)idx1_cl);
s3 = getARGB(arr,(int)idx0_cl,(int)idx1_cl);
s4 = getARGB(arr,(int)idx0_cl,(int)idx1_fl);
float x = idx0 - idx0_fl;
float y = idx1 - idx1_fl;
// s[0]= (int) (s1[0]*(1-x)*(1-y) + s2[0]*(1-x)*y + s3[0]*x*y + s4[0]*x*(1-y));
s[1]= (int) (s1[1]*(1-x)*(1-y) + s2[1]*(1-x)*y + s3[1]*x*y + s4[1]*x*(1-y));
s[2]= (int) (s1[2]*(1-x)*(1-y) + s2[2]*(1-x)*y + s3[2]*x*y + s4[2]*x*(1-y));
s[3]= (int) (s1[3]*(1-x)*(1-y) + s2[3]*(1-x)*y + s3[3]*x*y + s4[3]*x*(1-y));
}
#Override public PartialResult call() {
PartialResult partialResult = new PartialResult(startJ, endJ,input);
float centerX=input.getWidth()/2; //center of distortion
float centerY=input.getHeight()/2;
int width = input.getWidth(); //image bounds
int height = input.getHeight();
xshift = calc_shift(0,centerX-1,centerX,k);
float newcenterX = width-centerX;
float xshift_2 = calc_shift(0,newcenterX-1,newcenterX,k);
yshift = calc_shift(0,centerY-1,centerY,k);
float newcenterY = height-centerY;
float yshift_2 = calc_shift(0,newcenterY-1,newcenterY,k);
xscale = (width-xshift-xshift_2)/width;
yscale = (height-yshift-yshift_2)/height;
int p = startJ*radius;
int origPixel = 0;
int color = 0;
int i;
for (int j = startJ; j < endJ; j++){
for ( i = 0; i < width; i++, p++){
origPixel = input.getPixel(i,j);
float x = getRadialX((float)j,(float)i,centerX,centerY,k);
float y = getRadialY((float)j,(float)i,centerX,centerY,k);
sampleImage(input,x,y);
color = ((s[1]&0x0ff)<<16)|((s[2]&0x0ff)<<8)|(s[3]&0x0ff);
//Log.e(TAG, "radius = "+radius);
if(((i-centerX)*(i-centerX) + (j-centerY)*(j-centerY)) <= radius*(radius/4)){
partialResult.addValue(p, color);
}else{
partialResult.addValue(p, origPixel);
}
}//end of inner for
}//end of outer for
return partialResult;
}//end of call
}// end of partialprocessing
}//end of MultiProcesorFilter
.
[update] I'll post the view class that calls the barrel method. this class gets the touch events and sets the radius of the distortion prior to processing. You can see more how everything is set up before the distortion is applied.
public class TouchView extends View{
private File tempFile;
private byte[] imageArray;
private Bitmap bgr;
private Bitmap crop;
private Bitmap crop2;
private Bitmap overLay;
private Bitmap overLay2;
private Paint pTouch;
private float centreX;
private float centreY;
private float centreA;
private float centreB;
private Boolean xyFound = false;
private Boolean abFound = false;
private int Progress = 1;
private static final String TAG = "*********TouchView";
private Filters f = null;
private Filters f2 = null;
private boolean bothCirclesInPlace = false;
private MultiProcessorFilter mpf;
private MultiProcessorFilter mpf2;
private MultiRuntimeProcessorFilter mrpf;
private MultiRuntimeProcessorFilter mrpf2;
private int radius = 50;
protected boolean isLocked = false;
protected boolean isSaved = false;
protected byte [] data;
private float distance1;
private float distance2;
public TouchView(Context context) {
super(context);
}
public TouchView(Context context, AttributeSet attr) {
super(context,attr);
Log.e(TAG, "++++++++++ inside touchview constructor");
tempFile = new File(Environment.getExternalStorageDirectory().
getAbsolutePath() + "/"+"image.jpeg");
imageArray = new byte[(int)tempFile.length()];
// new Thread(new Runnable() {
// public void run() {
try{
InputStream is = new FileInputStream(tempFile);
BufferedInputStream bis = new BufferedInputStream(is);
DataInputStream dis = new DataInputStream(bis);
int i = 0;
while (dis.available() > 0 ) {
imageArray[i] = dis.readByte();
i++;
}
dis.close();
} catch (Exception e) {
e.printStackTrace();
}
// }
// }).start();
Bitmap bm = BitmapFactory.decodeByteArray(imageArray, 0, imageArray.length);
if(bm == null){
Log.e(TAG, "bm = null");
}else{
Log.e(TAG, "bm = not null");
}
bgr = bm.copy(bm.getConfig(), true);
overLay = null;
overLay2 = null;
bm.recycle();
pTouch = new Paint(Paint.ANTI_ALIAS_FLAG);
// pTouch.setXfermode(new PorterDuffXfermode(Mode.SRC_OUT));
pTouch.setColor(Color.RED);
pTouch.setStyle(Paint.Style.STROKE);
}// end of touchView constructor
public void findCirclePixels(){
//f = new Filters();
// f2 = new Filters();
//mpf = new MultiProcessorFilter();
//mpf2 = new MultiProcessorFilter();
mrpf = new MultiRuntimeProcessorFilter();
mrpf2 = new MultiRuntimeProcessorFilter();
crop = Bitmap.createBitmap(bgr,Math.max((int)centreX-radius,0),Math.max((int)centreY-radius,0),radius*2,radius*2);
crop2 = Bitmap.createBitmap(bgr,Math.max((int)centreA-radius,0),Math.max((int)centreB-radius,0),radius*2,radius*2);
new Thread(new Runnable() {
public void run() {
float prog = (float)Progress/150001;
// final Bitmap bgr3 = f.barrel(crop,prog);
// final Bitmap bgr4 = f2.barrel(crop2,prog);
//final Bitmap bgr3 = mpf.barrel(crop,prog);
// final Bitmap bgr4 = mpf2.barrel(crop2,prog);
final Bitmap bgr3 = mrpf.barrel(crop,prog,radius*2);
final Bitmap bgr4 = mrpf2.barrel(crop2,prog, radius*2);
TouchView.this.post(new Runnable() {
public void run() {
TouchView.this.overLay = bgr3;
TouchView.this.overLay2 = bgr4;
TouchView.this.invalidate();
}
});
}
}).start();
}// end of findCirclePixels()
#Override
public boolean onTouchEvent(MotionEvent ev) {
switch (ev.getAction()) {
case MotionEvent.ACTION_DOWN: {
int w = getResources().getDisplayMetrics().widthPixels;
int h = getResources().getDisplayMetrics().heightPixels;
if(ev.getX() <radius || ev.getX() > w - radius ){
// Log.e(TAG, "touch event is too near width edge!!!!!!!!!!");
showToastMessage("You touched too near the screen edge");
break;
}
if(ev.getY() <radius || ev.getY() > h - radius ){
// Log.e(TAG, "touch event is too near height edge!!!!!!!!!!");
showToastMessage("You touched too near the screen edge");
break;
}
distance1 = (float) Math.sqrt(Math.pow(ev.getX() - centreX, 2.0) + Math.pow(ev.getY() - centreY, 2.0));
distance2 = (float) Math.sqrt(Math.pow(ev.getX() - centreA, 2.0) + Math.pow(ev.getY() - centreB, 2.0));
Log.e(TAG, "dist1 = "+distance1 +" distance2 = " + distance2);
if(isLocked == false){
if(abFound == false){
centreA = (int) ev.getX();
centreB = (int) ev.getY();
abFound = true;
invalidate();
}
if(xyFound == false){
centreX = (int) ev.getX();
centreY = (int) ev.getY();
xyFound = true;
invalidate();
}
if(abFound == true && xyFound == true){
bothCirclesInPlace = true;
}
break;
}
}
case MotionEvent.ACTION_MOVE: {
if(isLocked == false){
/*if(xyFound == false){
centreX = (int) ev.getX()-70;
centreY = (int) ev.getY()-70;
xyFound = true;
}else{
centreA = (int) ev.getX()-70;
centreB = (int) ev.getY()-70;
bothCirclesInPlace = true;
invalidate();
}
*/
if(distance1 < distance2){
centreX = (int) ev.getX();
centreY = (int) ev.getY();
xyFound = true;
invalidate();
}else{
centreA = (int) ev.getX();
centreB = (int) ev.getY();
bothCirclesInPlace = true;
invalidate();
}
break;
}
}
case MotionEvent.ACTION_UP:
break;
}
return true;
}//end of onTouchEvent
public void initSlider(final HorizontalSlider slider)
{
slider.setOnProgressChangeListener(changeListener);
}
private OnProgressChangeListener changeListener = new OnProgressChangeListener() {
#Override
public void onProgressChanged(View v, int progress) {
if(isLocked == true){
setProgress(progress);
}else{
Toast.makeText(TouchView.this.getContext(), "press lock before applying distortion ", Toast.LENGTH_SHORT).show();
}
}
};
#Override
public void onDraw(Canvas canvas){
super.onDraw(canvas);
Log.e(TAG, "******about to draw bgr ");
canvas.drawBitmap(bgr, 0, 0, null);
if(isSaved == false){
if (isLocked == true && bothCirclesInPlace == true){
if(overLay != null)
canvas.drawBitmap(overLay, centreX-radius, centreY-radius, null);
if(overLay2 != null)
canvas.drawBitmap(overLay2, centreA-radius, centreB-radius, null);
}
if(bothCirclesInPlace == true && isLocked == false){
canvas.drawCircle(centreX, centreY, radius,pTouch);
canvas.drawCircle(centreA, centreB, radius,pTouch);
}
}else{
// String mFilePath : Absolute Path of the file to be saved
// Bitmap mBitmap1 : First bitmap. This goes as background.
// Bitmap mCBitmap : Bitmap associated with the Canvas. All draws on the canvas are drawn into this bitmap.
// Bitmap mBitmap2 : Second bitmap. This goes on top of first (in this example serves as foreground.
// Paint mPaint1 : Paint to draw first bitmap
// Paint mPaint2 : Paint to draw second bitmap on top of first bitmap
isSaved = false;
Bitmap mCBitmap = Bitmap.createBitmap(bgr.getWidth(), bgr.getHeight(), bgr.getConfig());
Canvas tCanvas = new Canvas(mCBitmap);
tCanvas.drawBitmap(bgr, 0, 0, null);
if(overLay != null)
tCanvas.drawBitmap(overLay, centreX-radius, centreY-radius, null);
if(overLay2 != null)
tCanvas.drawBitmap(overLay2, centreA-radius, centreB-radius, null);
canvas.drawBitmap(mCBitmap, 0, 0, null);
ByteArrayOutputStream bos = new ByteArrayOutputStream();
mCBitmap.compress(CompressFormat.JPEG, 100 /*ignored for PNG*/, bos);
data = bos.toByteArray();
try {
bos.flush();
bos.close();
} catch (IOException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
try {
bos.flush();
bos.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if ( data == null){
Log.e(TAG, "data in touchview before save clicked is null");
}else{
Log.e(TAG, "data in touchview before saved clicked is not null");
}
}
}//end of onDraw
protected void setProgress(int progress2) {
Log.e(TAG, "***********in SETPROGRESS");
this.Progress = progress2;
findCirclePixels();
}
public int getRadius() {
return radius;
}
public void setRadius(int r) {
radius = r;
invalidate();
}
public void showToastMessage(String mess){
Toast.makeText(TouchView.this.getContext(), mess.toString(), Toast.LENGTH_SHORT).show();
}
}
My guess would be that when the bottom of the image is processed, the operation operates partially on the input image, and partially outside of the image, due to the radius in your barrel method. Edges can often cause issues when operating outside the bounds of an actual image, giving 0 as a result, which can cause a black line...
I suggest to try to increase the size of your image:
#SuppressWarnings("unchecked")
public Bitmap barrel (Bitmap input, float k, int r){
this.radius = r;
this.input = input;
// Add an offset to the width and height equal to the radius
// To avoid performing processing outside the bounds of the input image
int []arr = new int[(input.getWidth() + this.radius) * (input.getHeight() + this.radius)];
// Continue...
Again, this is my first guess, and I have no time to check right now, but surely, investigating the edge first, would be my recommendation.
just a guess, what happen if you put this
BitmapDrawable bmpd = new BitmapDrawable(input);
int []arr = new int[(bmpd.getIntrinsicWidth() + this.radius) * (bmpd. getIntrinsicHeight() + this.radius)];
Your problem most likely has to do with your assumed coordinate system of the image and of the spherize algorithm.
See MathWorks Image Coordinate Systems
I expect that you are treating your input/output images according to the Pixel Indices method, but the spherize algorithm is processing your data using the Spatial Coordinate System. This often causes the outermost border of a processed image to be blank because the algorithm has translated your image up and to the left by 0.5 pixels. Coordinate 3 in the original system is now 3.5 in the new system and has fallen outside the bounds of computation.
This is actually a huge problem in 2D to 3D image processing algorithms as the projection between the two spaces is not exactly trivial and tiny implementation differences cause noticeable problems. Notice how the Pixel Indices coordinate system is 3x3, but the Spatial Coordinate system is essentially 4x4.
Try setting your spherize barrel to be width+1/height+1 instead of width/height and see if that fills out your missing row.

Categories

Resources