Edit bitmap using AsyncTask and progressbar - android

Sometimes when I edit a bitmap my phone needs more time than usual to apply changes. Can someone help me understand how to use an AsyncTask and a progressbar correctly.
For example in my application I do the following:
i.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View v) {
BitmapFactory.Options Options = new BitmapFactory.Options();
Bitmap bitmap= BitmapFactory.decodeFile(photoPath,Options);
bitmap = editing(bitmap,60);
}
});
Unfortunately editing takes more time than usual:
public Bitmap editing(Bitmap src, int degree) {
// get source image size
int width = src.getWidth();
int height = src.getHeight();
int[] pix = new int[width * height];
// get pixel array from source
src.getPixels(pix, 0, width, 0, 0, width, height);
int RY, GY, BY, RYY, GYY, BYY, R, G, B, Y;
double angle = (PI * (double)degree) / HALF_CIRCLE_DEGREE;
int S = (int)(RANGE * Math.sin(angle));
int C = (int)(RANGE * Math.cos(angle));
for (int y = 0; y < height; y++)
for (int x = 0; x < width; x++) {
int index = y * width + x;
int r = ( pix[index] >> 16 ) & 0xff;
int g = ( pix[index] >> 8 ) & 0xff;
int b = pix[index] & 0xff;
RY = ( 70 * r - 59 * g - 11 * b ) / 100;
GY = (-30 * r + 41 * g - 11 * b ) / 100;
BY = (-30 * r - 59 * g + 89 * b ) / 100;
Y = ( 30 * r + 59 * g + 11 * b ) / 100;
RYY = ( S * BY + C * RY ) / 256;
BYY = ( C * BY - S * RY ) / 256;
GYY = (-51 * RYY - 19 * BYY ) / 100;
R = Y + RYY;
R = ( R < 0 ) ? 0 : (( R > 255 ) ? 255 : R );
G = Y + GYY;
G = ( G < 0 ) ? 0 : (( G > 255 ) ? 255 : G );
B = Y + BYY;
B = ( B < 0 ) ? 0 : (( B > 255 ) ? 255 : B );
pix[index] = 0xff000000 | (R << 16) | (G << 8 ) | B;
}
// output bitmap
Bitmap outBitmap = Bitmap.createBitmap(width, height, src.getConfig());
outBitmap.setPixels(pix, 0, width, 0, 0, width, height);
pix = null;
return outBitmap;
}
}

private class myTask extends AsyncTask<Void,Void,Bitmap>{
private ProgressDialog progressDialog;
#Override
protected void onPreExecute() {
super.onPreExecute();
progressDialog = new ProgressDialog(MainActivity.this);
progressDialog.setMessage("Loading...");
progressDialog.setProgressStyle(ProgressDialog.STYLE_SPINNER);
progressDialog.show();
}
protected Bitmap doInBackground(Void... params) {
BitmapFactory.Options Options = new BitmapFactory.Options();
Bitmap bitmap= BitmapFactory.decodeFile(photoPath,Options);
bitmap = editing(bitmap,60);
return bitmap;
}
#Override
protected void onPostExecute(Bitmap result) {
//you can use result here to show it on UI
progressDialog.dismiss();
}
}

Related

YUV to RGB and CreateBitmap...which is the format?

I'm not expert with image format. I'm testing frame rate performance of camera.
When I convert data from YUV to RGB, this data which RGB format is? rgb565 or argb8888?
And why createBitmap take a long time? add info to raw data?
This is the rgb code
public int[] YUV_NV21_TO_RGB( byte[] yuv, int width, int height) {
final int frameSize = width * height;
int[] argb = new int[width*height];
final int ii = 0;
final int ij = 0;
final int di = +1;
final int dj = +1;
int a = 0;
for (int i = 0, ci = ii; i < height; ++i, ci += di) {
for (int j = 0, cj = ij; j < width; ++j, cj += dj) {
int y = (0xff & ((int) yuv[ci * width + cj]));
int v = (0xff & ((int) yuv[frameSize + (ci >> 1) * width + (cj & ~1) + 0]));
int u = (0xff & ((int) yuv[frameSize + (ci >> 1) * width + (cj & ~1) + 1]));
y = y < 16 ? 16 : y;
int a0 = 1192 * (y - 16);
int a1 = 1634 * (v - 128);
int a2 = 832 * (v - 128);
int a3 = 400 * (u - 128);
int a4 = 2066 * (u - 128);
int r = (a0 + a1) >> 10;
int g = (a0 - a2 - a3) >> 10;
int b = (a0 + a4) >> 10;
r = r < 0 ? 0 : (r > 255 ? 255 : r);
g = g < 0 ? 0 : (g > 255 ? 255 : g);
b = b < 0 ? 0 : (b > 255 ? 255 : b);
argb[a++] = 0xff000000 | (r << 16) | (g << 8) | b;
}
}
return argb;
}
The problem is that if i use CreateBitmap with RGB_565 option, time is at least 10 ms faster than ARGB8888.
If RGB_565 is a sort of compression (loss of data), should not be the opposite ( createBitmap with ARGB888 faster than RGB_565)?

Android bitmap image

I am new to android development .
I would like to know how i can modify an image selected by the user from gallery.
Is it possible to do this by getting the image using "File Input Stream"?
Or should i use Bitmap?
Create a canvas object from the respective Bitmap.
All editing can be done on the canvas instance and will be applied to the bitmap Object.
Using Aviary SDK. It also support iOS and WindowPhone7. Aviary provide most of function such as Orientation, Crop, and Sharpness,Red-Eye, Whiten, and Blemish,Stickers, Drawing, Text, and Meme (beta),Brightness, Saturation, and Contrast and custom Options.
import android.graphics.Bitmap;
public class ProcessingImage {
private Bitmap defaultImg;
private int idBitmap;
public int getIdBitmap() {
return idBitmap;
}
public void setIdBitmap(int idBitmap) {
this.idBitmap = idBitmap;
}
public Bitmap getDefaultImg() {
return defaultImg;
}
public void setDefaultImg(Bitmap defaultImg) {
this.defaultImg = defaultImg;
}
public ProcessingImage() {
}
public Bitmap processingI(Bitmap myBitmap) {
return myBitmap;
}
public Bitmap TintThePicture(int deg, Bitmap defaultBitmap) {
int w = defaultBitmap.getWidth();
int h = defaultBitmap.getHeight();
int[] pix = new int[w * h];
defaultBitmap.getPixels(pix, 0, w, 0, 0, w, h);
double angle = (3.14159d * (double) deg) / 180.0d;
int S = (int) (256.0d * Math.sin(angle));
int C = (int) (256.0d * Math.cos(angle));
int r, g, b, index;
int RY, BY, RYY, GYY, BYY, R, G, B, Y;
for (int y = 0; y < h; y++) {
for (int x = 0; x < w; x++) {
index = y * w + x;
r = (pix[index] >> 16) & 0xff;
g = (pix[index] >> 8) & 0xff;
b = pix[index] & 0xff;
RY = (70 * r - 59 * g - 11 * b) / 100;
BY = (-30 * r - 59 * g + 89 * b) / 100;
Y = (30 * r + 59 * g + 11 * b) / 100;
RYY = (S * BY + C * RY) / 256;
BYY = (C * BY - S * RY) / 256;
GYY = (-51 * RYY - 19 * BYY) / 100;
R = Y + RYY;
R = (R < 0) ? 0 : ((R > 255) ? 255 : R);
G = Y + GYY;
G = (G < 0) ? 0 : ((G > 255) ? 255 : G);
B = Y + BYY;
B = (B < 0) ? 0 : ((B > 255) ? 255 : B);
pix[index] = 0xff000000 | (R << 16) | (G << 8) | B;
}
}
Bitmap bm = Bitmap.createBitmap(w, h, defaultBitmap.getConfig());
bm.setPixels(pix, 0, w, 0, 0, w, h);
pix = null;
return bm;
}
}

Android JavaCV create IplImage from Camera to use with ColorHistogram

I am using JavaCV in Android.
In my code, I have created a ImageComparator(class of OpenCV CookBook
http://code.google.com/p/javacv/source/browse/OpenCV2_Cookbook/src/opencv2_cookbook/chapter04/ImageComparator.scala?repo=examples
http://code.google.com/p/javacv/wiki/OpenCV2_Cookbook_Examples_Chapter_4) Object and use that object to compare images. If I use file from SD card the comparator is working.
File referenceImageFile = new File(absPath1); // Read an image.
IplImage reference = Util.loadOrExit(referenceImageFile,CV_LOAD_IMAGE_COLOR);
comparator = new ImageComparator(reference);
comparator = new ImageComparator(reference);
But from Camera Preview, when I am creating IplImage it is not working. I am getting the following Exception during comparison "score" calculation.
score = referenceComparator.compare(grayImage) / imageSize;
java.lang.RuntimeException: /home/saudet/android/OpenCV-2.4.2/modules/core/src/convert.cpp:1196: error: (-215) i < src.channels() in function void cvSplit(const void*, void*, void*, void*, void*)
For CameraPreview I am using the code from FacePreview to create IplImage.But it create Image in grayScale.
int f = SUBSAMPLING_FACTOR;
if (grayImage == null || grayImage.width() != width / f
|| grayImage.height() != height / f) {
grayImage = IplImage.create(width / f, height / f, IPL_DEPTH_8U, 1);
}
int imageWidth = grayImage.width();
int imageHeight = grayImage.height();
int dataStride = f * width;
int imageStride = grayImage.widthStep();
ByteBuffer imageBuffer = grayImage.getByteBuffer();
for (int y = 0; y < imageHeight; y++) {
int dataLine = y * dataStride;
int imageLine = y * imageStride;
for (int x = 0; x < imageWidth; x++) {
imageBuffer.put(imageLine + x, data[dataLine + f * x]);
}
}
How to create a Color IplImage from Camera to use with ImageComparator?
The below code seems to be working fine.
public void onPreviewFrame(final byte[] data, final Camera camera) {
try {
Camera.Size size = camera.getParameters().getPreviewSize();
processImage(data, size.width, size.height);
camera.addCallbackBuffer(data);
} catch (RuntimeException e) {
// The camera has probably just been released, ignore.
Log.d("Exception", " " + e);
}
}
protected void processImage(byte[] data, int width, int height) {
score.clear();
// First, downsample our image
int f = SUBSAMPLING_FACTOR;
IplImage _4image = IplImage.create(width, height, IPL_DEPTH_8U, f);
int[] _temp = new int[width * height];
if (_4image != null) {
decodeYUV420SP(_temp, data, width, height);
_4image.getIntBuffer().put(_temp);
}
//bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
//bitmap.copyPixelsFromBuffer(_4image.getByteBuffer());
Log.d("CompareAndroid", "processImage");
int imageSize = _4image.width() * _4image.height();
Iterator<ImageComparator> iterator = reference_List.iterator();
// Compute histogram match and normalize by image size.
// 1 means perfect match.
while(iterator.hasNext()){
score.add(((ImageComparator) iterator.next()).compare(_4image) / imageSize);
}
Log.d("CompareImages", "Score Size "+score.size());
postInvalidate();
}
This code seems to be working fine.
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width,
int height) {
int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((b << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((r >> 10) & 0xff);
}
}
}
I haven't tested it, but something like this should work:
IplImage yuvimage = IplImage.create(width, height * 3 / 2, IPL_DEPTH_8U, 2);
IplImage rgbimage = IplImage.create(width, height, IPL_DEPTH_8U, 3);
cvCvtColor(yuvimage, rgbimage, CV_YUV2BGR_NV21);

How to save video file using canvas on android?

Currently I'm working on video effects like thermal, mono, etc.. for that I'm using preview callback with canvas stuff.
Can anyone please tell me how to save this video?
Below I have my callback code:
mCamera.setPreviewCallback(new PreviewCallback() {
public void onPreviewFrame(byte[] data1, Camera _camera)
{
Camera.Parameters parameters = _camera.getParameters();
parameters.setColorEffect(Camera.Parameters.EFFECT_NONE);
previewSize = parameters.getPreviewSize();
int frameSize = previewSize.width * previewSize.height;
int height = previewSize.height;
int width = previewSize.width;
int pixel;
int A, R, G, B;
final double GS_RED = 0.299;
final double GS_GREEN = 0.587;
final double GS_BLUE = 0.114;
int[] rgba = new int[frameSize+1];
data = data1;
for (int i = 0; i < height; i++)
for(int j = 0; j < width; j++)
{
int y = (0xff & ((int) data[i * previewSize.width + j]));
int u = (0xff & ((int) data[frameSize + (i >> 1) * previewSize.width + (j & ~1) + 0]));
int v = (0xff & ((int) data[frameSize + (i >> 1) * previewSize.width + (j & ~1) + 1]));
y = y < 16 ? 16 : y;
int r = Math.round(1.164f * (y - 16) + 1.596f * (v - 128));
int g = Math.round(1.164f * (y - 16) - 0.813f * (v - 128) - 0.391f * (u - 128));
int b = Math.round(1.164f * (y - 16) + 2.018f * (u - 128));
r = r < 0 ? 0 : (r > 255 ? 255 : r);
g = g < 0 ? 0 : (g > 255 ? 255 : g);
b = b < 0 ? 0 : (b > 255 ? 255 : b);
rgba[i * previewSize.width + j] = 0xff000000 + (b << 16) + (g<< 8) + r;
}
Bitmap bmp = Bitmap.createBitmap(width, height,
Bitmap.Config.RGB_565);
bmp.setPixels(rgba, 0 , width , 0, 0, width, height);
// scan through every single pixel
for(int x = 0; x < width; ++x) {
for(int y = 0; y < height; ++y) {
// get one pixel color
pixel = bmp.getPixel(x, y);
// retrieve color of all channels
A = Color.alpha(pixel);
R = Color.red(pixel);
G = Color.green(pixel);
B = Color.blue(pixel);
// take conversion up to one single value
R = G = B = (int)(GS_RED * R + GS_GREEN * G + GS_BLUE * B);
// set new pixel color to output bitmap
bmp.setPixel(x, y, Color.argb(A, R, G, B));
}
}
canvas = mHolder.lockCanvas();
if (canvas != null)
{
canvas.drawBitmap(bmp, (canvas.getWidth() - width) / 4, (canvas.getHeight() - height) / 4, null);
mHolder.unlockCanvasAndPost(canvas);
}
bmp.recycle();
}
});
You can save each frames and then can merge them with ffmpeg to encode , to create a video file.

Convert bitmap array to YUV (YCbCr NV21)

How to convert Bitmap returned by BitmapFactory.decodeFile() to YUV format (simillar to what camera's onPreviewFrame() returns in byte array)?
Here is some code that actually works:
// untested function
byte [] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
Following is the code for converting Bitmap to Yuv(NV21) Format.
void yourFunction(){
// mBitmap is your bitmap
int mWidth = mBitmap.getWidth();
int mHeight = mBitmap.getHeight();
int[] mIntArray = new int[mWidth * mHeight];
// Copy pixel data from the Bitmap into the 'intArray' array
mBitmap.getPixels(mIntArray, 0, mWidth, 0, 0, mWidth, mHeight);
// Call to encoding function : convert intArray to Yuv Binary data
encodeYUV420SP(data, intArray, mWidth, mHeight);
}
static public void encodeYUV420SP(byte[] yuv420sp, int[] rgba,
int width, int height) {
final int frameSize = width * height;
int[] U, V;
U = new int[frameSize];
V = new int[frameSize];
final int uvwidth = width / 2;
int r, g, b, y, u, v;
for (int j = 0; j < height; j++) {
int index = width * j;
for (int i = 0; i < width; i++) {
r = Color.red(rgba[index]);
g = Color.green(rgba[index]);
b = Color.blue(rgba[index]);
// rgb to yuv
y = (66 * r + 129 * g + 25 * b + 128) >> 8 + 16;
u = (-38 * r - 74 * g + 112 * b + 128) >> 8 + 128;
v = (112 * r - 94 * g - 18 * b + 128) >> 8 + 128;
// clip y
yuv420sp[index] = (byte) ((y < 0) ? 0 : ((y > 255) ? 255 : y));
U[index] = u;
V[index++] = v;
}
}
If using java to convert Bitmap to YUV byte[] is too slow for you, you can try libyuv by Google
Via OpenCV library you can replace encodeYUV420SP java function with one native OpenCV line and it is ~4x more fastest:
Mat mFrame = Mat(height,width,CV_8UC4,pFrameData).clone();
Complete example:
Java side:
Bitmap bitmap = mTextureView.getBitmap(mWidth, mHeight);
int[] argb = new int[mWidth * mHeight];
// get ARGB pixels and then proccess it with 8UC4 opencv convertion
bitmap.getPixels(argb, 0, mWidth, 0, 0, mWidth, mHeight);
// native method (NDK or CMake)
processFrame8UC4(argb, mWidth, mHeight);
Native side (NDK):
JNIEXPORT jint JNICALL com_native_detector_Utils_processFrame8UC4
(JNIEnv *env, jobject object, jint width, jint height, jintArray frame) {
jint *pFrameData = env->GetIntArrayElements(frame, 0);
// it is the line:
Mat mFrame = Mat(height,width,CV_8UC4,pFrameData).clone();
// the next only is a extra example to gray convertion:
Mat mout;
cvtColor(mFrame, mout,CV_RGB2GRAY);
int objects = face_detection(env, mout);
env->ReleaseIntArrayElements(frame, pFrameData, 0);
return objects;
}
The bmp file will be in RGB888 format, so you will need to convert it to YUV.
I have not come across any api in Android that will do this for you.
But you can do this yourself, see this link on how to..
first you calculate the rgb data:
r=(p>>16) & 0xff;
g=(p>>8) & 0xff;
b= p & 0xff;
y=0.2f*r+0.7*g+0.07*b;
u=-0.09991*r-0.33609*g+0.436*b;
v=0.615*r-0.55861*g-0.05639*b;
y, u and v are the composants of the yuv matrix.

Categories

Resources