Android Low image quality / altered image when using glReadPixels - android

I've spend a lot of time trying to figure this out but can't see what I am doing wrong.
This is my original image
Image recaptured 5 times
Recapturing the image multiple times clearly shows that there is something not right. Capturing it once is just ok but twice is enough to clearly see the difference.
I found these similar issues on stackoverflow:
Bitmap quality using glReadPixels with frame buffer objects
Extract pixels from TextureSurface using glReadPixels resulting in bad image Bitmap
(sorry limited to links I can add)
Unfortunately none of the proposed suggestions/solutions fixed my issue.
This is my code:
private Bitmap createSnapshot (int x, int y, int w, int h) {
int bitmapBuffer[] = new int[w * h];
int bitmapSource[] = new int[w * h];
IntBuffer intBuffer = IntBuffer.wrap(bitmapBuffer);
intBuffer.position(0);
try {
glReadPixels(x, y, w, h, GL_RGBA, GL_UNSIGNED_BYTE, intBuffer);
int offset1, offset2;
for (int i = 0; i < h; i++) {
offset1 = i * w;
offset2 = (h - i - 1) * w;
for (int j = 0; j < w; j++) {
int texturePixel = bitmapBuffer[offset1 + j];
int blue = (texturePixel >> 16) & 0xff;
int red = (texturePixel << 16) & 0x00ff0000;
int pixel = (texturePixel & 0xff00ff00) | red | blue;
bitmapSource[offset2 + j] = pixel;
}
}
} catch (GLException e) {
return null;
}
return Bitmap.createBitmap(bitmapSource, w, h, Bitmap.Config.ARGB_8888);
}
I'm using OpenGL 2. For bitmap compression I am using PNG. Tested it using JPEG (quality 100) and the result is the same but slightly worse.
There is also a slight yellowish tint added to the image.

Related

Android RGB to YCbCr Conversion and output to imageView

I am doing an image processing which require to convert RGB bitmap image to YCbCr color space. I retrieved RGB value for each pixel and apply the conversion matrix to it.
public void convertRGB (View v) {
if (imageLoaded) {
int width = inputBM.getWidth();
int height = inputBM.getHeight();
int pixel;
int alpha, red, green, blue;
int Y,Cb,Cr;
outputBM = Bitmap.createBitmap(width, height, inputBM.getConfig());
for (int x = 0; x < width; x++) {
for (int y = 0; y < height; y++) {
pixel = inputBM.getPixel(x, y);
alpha = Color.alpha(pixel);
red = Color.red(pixel);
green = Color.green(pixel);
blue = Color.blue(pixel);
Y = (int) (0.299 * red + 0.587 * green + 0.114 * blue);
Cb = (int) (128-0.169 * red-0.331 * green + 0.500 * blue);
Cr = (int) (128+0.500 * red - 0.419 * green - 0.081 * blue);
int p = (Y << 24) | (Cb << 16) | (Cr<<8);
outputBM.setPixel(x,y,p);
}
}
comImgView.setImageBitmap(outputBM);
}
}
The problem is he output color is different with original. I tried to use BufferedImage but it do not work in Android
Original:
After Conversion:
May I know what is the correct way to handle YCbCr image in android java.
Try setting using below code
ByteArrayOutputStream out = new ByteArrayOutputStream();
YuvImage yuvImage = new YuvImage(your_yuv_data, ImageFormat.NV21, width, height, null);
yuvImage.compressToJpeg(new Rect(0, 0, width, height), 50, out);
byte[] imageBytes = out.toByteArray();
Bitmap image = BitmapFactory.decodeByteArray(imageBytes, 0, imageBytes.length);
iv.setImageBitmap(image);
Check documentation for detailed description for YuvImage Class.

Android TensorFlow - prevent resize Image

I am working on TensorFlow stylize image. But, the problem I am facing is that it resize my actual image. I want to apply style on whole image itself. For example, if my image resolution is 1280x960, it should be the same after I apply style on it.
I am not using default INPUT_SIZE value 256. Using default value it works fine. Here is my code I am using to prevent resize image.
private TensorFlowInferenceInterface inferenceInterface;
private void applyStyle(){
inferenceInterface = new TensorFlowInferenceInterface(mActivity.getAssets(), "bossK_float.pb");
Bitmap bitmap = getBitmapFromPath();
bitmap=Bitmap.createBitmap(bitmap,0,bitmap.getWidth(),bitmap.getHeight(), matrix, true);
INPUT_SIZE_WIDTH = bitmap.getWidth();
INPUT_SIZE_HEIGHT = bitmap.getHeight();
mStyledBitmap = stylizeImage(bitmap);
}
private Bitmap stylizeImage(Bitmap bitmap) {
Bitmap scaledBitmap = scaleBitmap(bitmap, INPUT_SIZE_WIDTH, INPUT_SIZE_HEIGHT);
intValues = new int[INPUT_SIZE_WIDTH * INPUT_SIZE_HEIGHT];
floatValues = new float[INPUT_SIZE_WIDTH * INPUT_SIZE_HEIGHT * 3];
scaledBitmap.getPixels(intValues, 0, scaledBitmap.getWidth(), 0, 0, scaledBitmap.getWidth(), scaledBitmap.getHeight());
scaledBitmap = scaledBitmap.copy(Bitmap.Config.ARGB_8888, true);
for (int i = 0; i < intValues.length; ++i) {
final int val = intValues[i];
floatValues[i * 3 + 0] = ((val >> 16) & 0xFF) * 1.0f;
floatValues[i * 3 + 1] = ((val >> 8) & 0xFF) * 1.0f;
floatValues[i * 3 + 2] = (val & 0xFF) * 1.0f;
}
Trace.beginSection("feed");
inferenceInterface.feed(INPUT_NAME, floatValues, INPUT_SIZE_WIDTH, INPUT_SIZE_HEIGHT, 3);
Trace.endSection();
Trace.beginSection("run");
inferenceInterface.run(new String[]{OUTPUT_NAME});
Trace.endSection();
Trace.beginSection("fetch");
inferenceInterface.fetch(OUTPUT_NAME, floatValues);
Trace.endSection();
for (int i = 0; i < intValues.length; ++i) {
intValues[i] =
0xFF000000
| (((int) (floatValues[i * 3 + 0])) << 16)
| (((int) (floatValues[i * 3 + 1])) << 8)
| ((int) (floatValues[i * 3 + 2]));
}
scaledBitmap.setPixels(intValues, 0, scaledBitmap.getWidth(), 0, 0, scaledBitmap.getWidth(), scaledBitmap.getHeight());
return scaledBitmap;
}
private Bitmap scaleBitmap(Bitmap origin, int newWidth, int newHeight) {
if (origin == null) {
return null;
}
int height = origin.getHeight();
int width = origin.getWidth();
float scaleWidth = ((float) newWidth) / width;
float scaleHeight = ((float) newHeight) / height;
Matrix matrix = new Matrix();
matrix.postScale(scaleWidth, scaleHeight);
Bitmap newBitmap = Bitmap.createBitmap(origin, 0, 0, width, height, matrix, false);
return newBitmap;
}
When I change my INPUT_SIZE values to INPUT_SIZE_WIDTH and INPUT_SIZE_HEIGHT, my application stops without error message. I debug this code, but it stucks on this piece of code and stop my app:
Trace.beginSection("run");
inferenceInterface.run(new String[]{OUTPUT_NAME});
Trace.endSection();
Please let me know, how can I style whole image using TensorFlow.
Thank You!
Your code stops there because of the differences in size. You probably must be getting an ArrayOutOfBound Exception.
The model is to be trained to accept images of a particular size. So, whenever you classify, the image is to be reduced to that particular size.
Even your training data which when creating a pb/lite/tflite file will be converted to accept the same size images you mention within the model creation. The results will not affect to a larger extinct. You can give that a try.

Android GLSurfaceView Saving bitmap causes exception

I've modified this project, which uses GLSurfaceView and Effects to show a ViewPager with some of the effects applied to an image.
Additionally, I created a overlay bitmap, that is put over every image after the effect has been applied.
Up this point, the app is working fine. But now I have to save the displayed image in a file, when a button is pressed.
So i used this code:
private Bitmap createBitmapFromGLSurface(int x, int y, int w, int h, GL10 gl)
throws OutOfMemoryError {
int bitmapBuffer[] = new int[w * h];
int bitmapSource[] = new int[w * h];
IntBuffer intBuffer = IntBuffer.wrap(bitmapBuffer);
intBuffer.position(0);
try {
gl.glReadPixels(x, y, w, h, GL10.GL_RGBA, GL10.GL_UNSIGNED_BYTE, intBuffer);
int offset1, offset2;
for (int i = 0; i < h; i++) {
offset1 = i * w;
offset2 = (h - i - 1) * w;
for (int j = 0; j < w; j++) {
int texturePixel = bitmapBuffer[offset1 + j];
int blue = (texturePixel >> 16) & 0xff;
int red = (texturePixel << 16) & 0x00ff0000;
int pixel = (texturePixel & 0xff00ff00) | red | blue;
bitmapSource[offset2 + j] = pixel;
}
}
} catch (GLException e) {
e.printStackTrace();
return null;
}
return Bitmap.createBitmap(bitmapSource, w, h, Bitmap.Config.ARGB_8888);
}
to obtain a Bitmap. When the button is pressed, I call this method:
protected void onClick() {
read = true;
mEffectView.requestRender();
}
Which forces the rendering, so I generate the bitmap and save it on a file using an AsyncTask.
read is used as a semphore in onDrawFrame(GL10 gl) to generate only the bitmap when I want to save it.
Saving one image works fine. When I save a second one, then I change page, this error comes up:
A/Bitmap: Failed to acquire strong reference to pixels
A/libc: Fatal signal 6 (SIGABRT), code -6 in tid 20475 (GLThread 9540)
Another issue is that the overlay, though is displayed, is not saved in the image.
This is how I apply it:
Generation
EffectFactory effectFactory = mEffectContext.getFactory();
overlayEffect = effectFactory.createEffect(EffectFactory.EFFECT_BITMAPOVERLAY);
overlayEffect.setParameter("bitmap", overlay);
Effect applying
mEffect.apply(mTextures[0], mImageWidth, mImageHeight, mTextures[1]);
overlayEffect.apply(mTextures[1], mImageWidth, mImageHeight, mTextures[2]);
With mEffect is the only effect visible when saving the image.
What I'm doing wrong?
EDIT
I solved the last problem: I find out that you have to release and recreate every Effect object you are using every time is called mEffectView.requestRender().
Apparently, when using
overlayEffect = effectFactory.createEffect(EffectFactory.EFFECT_BITMAPOVERLAY);
overlayEffect.setParameter("bitmap", overlay);
the passed bitmap is recycled!
So I solved the issue passing a copy of it:
overlayEffect = effectFactory.createEffect(EffectFactory.EFFECT_BITMAPOVERLAY);
overlayEffect.setParameter("bitmap", overlay.copy(overlay.getConfig(), false));
Hope this will help somebody else!

Android camera write text directly into frame

I have camera (the "deprecated" API) and camera PreviewCallback, in which I get frames. I use that to take pictures (not takePicure or PictureCallback, because I want it fast over quality) and save as jpeg, code snippset below:
#Override
public synchronized void onPreviewFrame(byte[] frame, Camera camera) {
YuvImage yuv = new YuvImage(frame, previewFormat, width, height, null);
ByteArrayOutputStream out = new ByteArrayOutputStream();
yuv.compressToJpeg(new Rect(0, 0, width, height), 50, out);
out.toByteArray(); // this is my JPEG
// ... save this JPEG
}
I want to draw text, timestamp, into frame. I know how to do this by converting to Bitmap, using Canvas, drawing text and converting to Bitmap again. But this method is relatively slow (it's not biggest issue), but also i need the frame byte array to other things i.e. put into video and I would like to know if there is some method or lib to directly write text into that frame. I don't use preview (or rather, I use dummy preview), I'm asking how to change the frame byte array. I bet I'd need to do some mumble-jumble into byte array to make it work. The frame is in standard(?) format (YUV420/NV21).
Edit:
I managed to get a working function (getNV21). Of course it is far from efficient at it creates new Bitmap every frame and draws text to it but at least i have something I can work on, directly into yuv image. Still, answers would be appreciated.
private Bitmap drawText(String text, int rotation) {
Bitmap bitmap = Bitmap.createBitmap(maxWidth, maxHeight, Bitmap.Config.ARGB_8888);
int width = bitmap.getWidth();
int height = bitmap.getHeight();
Canvas canvas = new Canvas(bitmap);
Paint paint = new Paint(Paint.ANTI_ALIAS_FLAG);
paint.setColor(Color.rgb(255, 255, 255));
paint.setStrokeWidth(height/36);
paint.setTextSize(height/36);
paint.setShadowLayer(5f, 0f, 0f, Color.BLACK);
paint.setTypeface(Typeface.MONOSPACE);
if (rotation == 0 || rotation == 180) {
canvas.rotate(rotation,width/2,height/2);
} else {
canvas.translate(Math.abs(width-height), 0);
int w = width/2 - Math.abs(width/2-height/2);
int h = height/2;
canvas.rotate(-rotation,w,h);
}
canvas.drawText(text, 10, height-10, paint);
return bitmap;
}
byte [] getNV21(byte[] yuv, int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
private static void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++,index++,yIndex++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
if (R == 0 && G == 0 && B == 0) {
if (j % 2 == 0 && index % 2 == 0) uvIndex+=2;
continue;
}
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
}
}
}

Android SetPixels() Explanation and Example?

I am trying to set a region of pixels in a mutable bitmap a different color in my android app. Unfortunately I cannot get setPixels() to work properly. I am constantly getting ArrayOutOfBoundsExceptions. I think it may have something to do with the stride, but I'm really not sure. That is the only parameter that I still don't understand. The only other post I have seen on setPixels (not setPixel) is here: drawBitmap() and setPixels(): what's the stride? and it did not help me. I tried setting the stride as 0, as the width of the bitmap, as the width of the bitmap - the area i'm trying to draw and it still crashes. Here is my code:
public void updateBitmap(byte[] buf, int offset, int x, int y, int width, int height) {
// transform byte[] to int[]
IntBuffer intBuf = ByteBuffer.wrap(buf).asIntBuffer();
int[] intarray = new int[intBuf.remaining()];
intBuf.get(intarray);
int stride = ??????
screenBitmap.setPixels(intarray, offset, stride, x, y, width, height); // crash here
My bitmap is mutable, so I know that is not the problem. I am also certain that my byte array is being properly converted to an integer array. But I keep getting ArrayOutOfBoundsExceptions and I don't understand why. Please help me figure this out
EDIT -
here is how I construct the fake input:
int width = 1300;
int height = 700;
byte[] buf = new byte[width * height * 4 * 4]; // adding another * 4 here seems to work... why?
for (int i = 0; i < width * height * 4 * 4; i+=4) {
buf[i] = (byte)255;
buf[i + 1] = 3;
buf[i + 2] = (byte)255;
buf[i + 3] = 3;
}
//(byte[] buf, int offset, int x, int y, int width, int height) - for reference
siv.updateBitmap(buf, 0, 0, 0, width, height);
So the width and height are the correct amount of ints (at least it should be).
EDIT2 - here is the code for the original creation of screenBitmap:
public Bitmap createABitmap() {
int w = 1366;
int h = 766;
byte[] buf = new byte[h * w * 4];
for (int i = 0; i < h * w * 4;i+=4) {
buf[i] = (byte)255;
buf[i+1] = (byte)255;
buf[i+2] = 0;
buf[i+3] = 0;
}
DisplayMetrics metrics = new DisplayMetrics();
getWindowManager().getDefaultDisplay().getMetrics(metrics);
IntBuffer intBuf = ByteBuffer.wrap(buf).asIntBuffer();
int[] intarray = new int[intBuf.remaining()];
intBuf.get(intarray);
Bitmap bmp = Bitmap.createBitmap(metrics, w, h, itmap.Config.valueOf("ARGB_8888"));
bmp.setPixels(intarray, 0, w, 0, 0, w, h);
return bmp;
}
It seems to work in this instance, not sure what the difference is
Short
I think, stride is the width of the image contained in pixels.
long
What I take from the documentation, the algorithm should work somehow like this:
public void setPixels (int[] pixels,
int offset,
int stride,
int x,
int y,
int width,
int height)
for (int rowIndex = 0; rowIndex < height; rowIndex++) {
int rowStart = offset + stride * rowIndex; // row position in pixels
int bitmapY = y + rowIndex; // y position in the bitmap
for (int columnIndex = 0; columnIndex < width; columnIndex++) {
int bitmapX = x + columnIndex; // x position in the bitmap
int pixelsIndex = rowStart + columnIndex; // position in pixels
setPixel(bitmapX, bitmapY, pixels[pixelsIndex]);
}
}
}
At least this is what I would do with these arguments because is allows you to have one pixels as source and cut out different images in different sizes.
Feel free to correct the algorithm if I am wrong.
So, say you have an image with pixelsWidth and pixelsHeight in pixels.
Then you want to copy a section at pixelsX and pixelsY with width and height to a bitmap at position bitmapX and bitmapY.
This should be the call:
bitmap.setPixels(
pixelsWidth * pixelsY + pixelsX, // offset
pixelsWidth, // stride
bitmapX, // x
bitmapY, // y
width,
height)
probably it should be:
screenBitmap.setPixels(intarray, 0, width / 4, x, y, width / 4, height);
because you have converted byte to int. your error is ArrayOutOfBoundsExceptions. check whether the size intBuf.remaining() = width * height / 4.
If you're trying to draw on a bitmap, your best approach would be to abandon this method and use a canvas instead:
Canvas canvas = new Canvas(screenBitmap);
You can then draw specific points (if you want to draw a pixel), or other shapes like rectangles, circles, etc:
canvas.drawPoint(x, y, paint);
etc
Hope this helps.

Categories

Resources