In Photoshop there are a couple of blending modes, such as:
Color Burn
Multiply
Darken
Lighten
Overlay
(more information here: http://help.adobe.com/en_US/photoshop/cs/using/WSfd1234e1c4b69f30ea53e41001031ab64-77eba.html)
For example, for mode Color Burn "looks at the color information in each channel and darkens the base color to reflect the blend color by increasing the contrast between the two. Blending with white produces no change".
This is my code:
Bitmap blendBitmap = BitmapFactory.decodeStream(ctx.getAssets().open(filename));
Canvas canvas = new Canvas(srcBitmap);
canvas.drawBitmap(blendBitmap, 0, 0, null); // ?
p.recycle();
p = null;
Is it possible to apply, for example, Color Burn blending mode instead of simply drawing image above other image (like in this small code)?
It was not difficult.
I used NDK (because of performance) to manipulate with pixels. This information of blending modes was very useful: How does photoshop blend two images together?
My final solution is:
#define ChannelBlend_ColorBurn(A, B) ((uint8_t) ((B == 0) ? B : max(0, (255 - ((255 -
#define ChannelBlend_Alpha(A, B, O) ((uint8_t) (O * A + (1 - O) * B))
#define ChannelBlend_AlphaF(A, B, F, O) (ChannelBlend_Alpha(F(A, B), A, O))
typedef struct {
uint8_t red;
uint8_t green;
uint8_t blue;
uint8_t alpha;
} rgba;
// Blend
JNIEXPORT void
JNICALL Java_com_package_Filter_jniBlend(JNIEnv* env, jobject obj, jobject bitmapA,
jobject bitmapB, jobject bitmapOut, jint mode) {
// Properties
AndroidBitmapInfo infoA;
void* pixelsA;
AndroidBitmapInfo infoB;
void* pixelsB;
AndroidBitmapInfo infoOut;
void* pixelsOut;
int ret;
// Get image info
if ((ret = AndroidBitmap_getInfo(env, bitmapA, &infoA)) < 0 ||
(ret = AndroidBitmap_getInfo(env, bitmapB, &infoB)) < 0 ||
(ret = AndroidBitmap_getInfo(env, bitmapOut, &infoOut)) < 0) {
return;
}
// Check image
if (infoA.format != ANDROID_BITMAP_FORMAT_RGBA_8888 ||
infoB.format != ANDROID_BITMAP_FORMAT_RGBA_8888 ||
infoOut.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
return;
}
// Lock all images
if ((ret = AndroidBitmap_lockPixels(env, bitmapA, &pixelsA)) < 0 ||
(ret = AndroidBitmap_lockPixels(env, bitmapB, &pixelsB)) < 0 ||
(ret = AndroidBitmap_lockPixels(env, bitmapOut, &pixelsOut)) < 0) {
LOGE("Error! %d", ret);
}
int h = infoA.height;
int w = infoA.width;
int wh = w * h;
int n;
rgba* inputA = (rgba*) pixelsA;
rgba* inputB = (rgba*) pixelsB;
rgba* output = (rgba*) pixelsOut;
rgba pA, pB;
int x, y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
n = y * w + x;
pA = inputA[n];
pB = inputB[n];
float alpha = (float) pB.alpha / 255.0;
output[n].red = ChannelBlend_AlphaF(pA.red, pB.red, ChannelBlend_ColorBurn, alpha);
output[n].green = ChannelBlend_AlphaF(pA.green, pB.green, ChannelBlend_ColorBurn, alpha);
output[n].blue = ChannelBlend_AlphaF(pA.blue, pB.blue, ChannelBlend_ColorBurn, alpha);
}
}
// Unlocks everything
AndroidBitmap_unlockPixels(env, bitmapA);
AndroidBitmap_unlockPixels(env, bitmapB);
AndroidBitmap_unlockPixels(env, bitmapOut);
}
Small remark to improve performance: After me make equation for single pixel it will be good to store value in some kind of cache to access it faster without equation next time.
Related
I'm trying to write an YUV420P to RGB888 for when I have the entire thing as one giant buffer with Y (of size width*height) then Cr (of size width*height/4) then Cb (of size width*height/4). The output should be an RGB buffer with size width*height*3.
I think my function below is very inefficient. For example, I use the ceiling function (shouldn't it return an int? In my case it's returning a double, why?) and I've never seen any color conversion function use this function. But this is the way I found to get the corresponding Cr and Cb to each Y.
JNIEXPORT void JNICALL Java_com_example_mediacodecdecoderexample_YuvToRgb_YUVtoRBGA2(JNIEnv * env, jobject obj, jbyteArray yuv420sp, jint width, jint height, jbyteArray rgbOut)
{
//ITU-R BT.601 conversion
//
// R = 1.164*(Y-16)+1.596*(Cr-128)
// G = 1.164*(Y-16)-0.392*(Cb-128)-0.813*(Cr-128)
// B = 1.164*(Y-16)+2.017*(Cb-128)
//
int Y;
int Cr;
int Cb;
int R;
int G;
int B;
int size = width * height;
//After width*height luminance values we have the Cr values
size_t CrBase = size;
//After width*height luminance values + width*height/4 we have the Cb values
size_t CbBase = size + width*height/4;
jbyte *rgbData = (jbyte*) ((*env)->GetPrimitiveArrayCritical(env, rgbOut, 0));
jbyte* yuv = (jbyte*) (*env)->GetPrimitiveArrayCritical(env, yuv420sp, 0);
for (int i=0; i<size; i++) {
Y = rgbData[i] - 16;
Cr = rgbData[CrBase + ceil(i/4)] - 128;
Cb = rgbData[CbBase + ceil(i/4)] - 128;
R = 1.164*Y+1.596*Cr;
G = 1.164*Y-0.392*Cb-0.813*Cr;
B = 1.164*Y+2.017*Cb;
yuv[i*3] = R;
yuv[i*3+1] = G;
yuv[i*3+2] = B;
}
(*env)->ReleasePrimitiveArrayCritical(env, rgbOut, rgbData, 0);
(*env)->ReleasePrimitiveArrayCritical(env, yuv420sp, yuv, 0);
}
I'm doing this because I haven't found a function that does exactly this and I need one for a MediaCodec decoded buffer. But even if there's one, I'd like to know what can be done to improve my function, just to learn.
UPDATE:
I modified the code based on the answer below in order for it to work with ByteBuffer:
JNIEXPORT void JNICALL Java_com_lucaszanella_mediacodecdecoderexample_YuvToRgb_YUVtoRBGA2(JNIEnv * env, jobject obj, jobject yuv420sp, jint width, jint height, jobject rgbOut)
{
//ITU-R BT.601 conversion
//
// R = 1.164*(Y-16)+1.596*(Cr-128)
// G = 1.164*(Y-16)-0.392*(Cb-128)-0.813*(Cr-128)
// B = 1.164*(Y-16)+2.017*(Cb-128)
//
char *rgbData = (char*)(*env)->GetDirectBufferAddress(env, rgbOut);
char *yuv = (char*)(*env)->GetDirectBufferAddress(env, yuv420sp);
const int size = width * height;
//After width*height luminance values we have the Cr values
const size_t CrBase = size;
//After width*height luminance values + width*height/4 we have the Cb values
const size_t CbBase = size + width*height/4;
for (int i=0; i<size; i++) {
int Y = yuv[i] - 16;
int Cr = yuv[CrBase + i/4] - 128;
int Cb = yuv[CbBase + i/4] - 128;
double R = 1.164*Y+1.596*Cr;
double G = 1.164*Y-0.392*Cb-0.813*Cr;
double B = 1.164*Y+2.017*Cb;
rgbData[i*3] = (R > 255) ? 255 : ((R < 0) ? 0 : R);
rgbData[i*3+1] = (G > 255) ? 255 : ((G < 0) ? 0 : G);
rgbData[i*3+2] = (B > 255) ? 255 : ((B < 0) ? 0 : B);
}
}
however it's crashing. I don't see anything being written outside of boundary. Anyone have any idea?
UPDATE:
Code above works if we call it with a direct byte buffer. Won't work if the buffer is not direct.
Added
if (rgbData==NULL) {
__android_log_print(ANDROID_LOG_ERROR, "TRACKERS", "%s", "RGB data null");
}
if (yuv==NULL) {
__android_log_print(ANDROID_LOG_ERROR, "TRACKERS", "%s", "yuv data null");
}
if (rgbData==NULL || yuv==NULL) {
return;
}
for safety.
Anyways, color is not correct:
Is it just me, but but shouldn't you be reading from the yuv array and writing to the rgbData array? You actually have it reversed in your implementation.
There's not need to invoke ceil on an integer expression such as i/4. And when you implement an image processing route, invoking a function call on every pixel is just going to kill performance (been there, done that). Maybe the compiler can optimize it out, but why take that chance.
So change this:
Cr = rgbData[CrBase + ceil(i/4)] - 128;
Cb = rgbData[CbBase + ceil(i/4)] - 128;
To this:
Cr = rgbData[CrBase + i/4] - 128;
Cb = rgbData[CbBase + i/4] - 128;
The only other thing to be wary of is that you may want to clamp R, G, and B to be in the 8-bit byte range before assigning back to the yuv array. Those math equations can produce results < 0 and > 255.
Another micro-optimization is to declare all your variables within the for-loop block so the compiler has more hints about optimizing on it as temporaries. And declaring some of your other constants as const May I suggest:
JNIEXPORT void JNICALL Java_com_example_mediacodecdecoderexample_YuvToRgb_YUVtoRBGA2(JNIEnv * env, jobject obj, jbyteArray yuv420sp, jint width, jint height, jbyteArray rgbOut)
{
//ITU-R BT.601 conversion
//
// R = 1.164*(Y-16)+1.596*(Cr-128)
// G = 1.164*(Y-16)-0.392*(Cb-128)-0.813*(Cr-128)
// B = 1.164*(Y-16)+2.017*(Cb-128)
//
const int size = width * height;
//After width*height luminance values we have the Cr values
const size_t CrBase = size;
//After width*height luminance values + width*height/4 we have the Cb values
const size_t CbBase = size + width*height/4;
jbyte *rgbData = (jbyte*) ((*env)->GetPrimitiveArrayCritical(env, rgbOut, 0));
jbyte* yuv= (jbyte*) (*env)->GetPrimitiveArrayCritical(env, yuv420sp, 0);
for (int i=0; i<size; i++) {
int Y = yuv[i] - 16;
int Cr = yuv[CrBase + i/4] - 128;
int Cb = yuv[CbBase + i/4] - 128;
int R = 1.164*Y+1.596*Cr;
int G = 1.164*Y-0.392*Cb-0.813*Cr;
int B = 1.164*Y+2.017*Cb;
rgbData[i*3] = (R > 255) ? 255 : ((R < 0) ? 0 : R);
rgbData[i*3+1] = (G > 255) ? 255 : ((G < 0) ? 0 : G);
rgbData[i*3+2] = (B > 255) ? 255 : ((B < 0) ? 0 : B);
}
(*env)->ReleasePrimitiveArrayCritical(env, rgbOut, rgbData, 0);
(*env)->ReleasePrimitiveArrayCritical(env, yuv420sp, yuv, 0);
}
Then the only left to do is just to compile with max optimizations on. The compiler will take care of the rest.
After that, investigating SIMD optimizations, which some compilers offer as a compiler switch (or enabled via pragma).
A little modification to selbie's answer which uses ByteBuffer which is more useful since it's what Java produces when it decodes.
JNIEXPORT void JNICALL Java_com_example_mediacodecdecoderexample_YuvToRgb_YUVtoRBGA2(JNIEnv * env, jobject obj, jobject yuv420sp, jint width, jint height, jobject rgbOut)
{
//ITU-R BT.601 conversion
//
// R = 1.164*(Y-16)+1.596*(Cr-128)
// G = 1.164*(Y-16)-0.392*(Cb-128)-0.813*(Cr-128)
// B = 1.164*(Y-16)+2.017*(Cb-128)
//
const int size = width * height;
//After width*height luminance values we have the Cr values
const size_t CrBase = size;
//After width*height luminance values + width*height/4 we have the Cb values
const size_t CbBase = size + width*height/4;
jbyte *rgbData = (*env)->GetDirectBufferAddress(env, rgbOut);
jbyte *yuv = (*env)->GetDirectBufferAddress(env, yuv420sp);
for (int i=0; i<size; i++) {
int Y = yuv[i] - 16;
int Cr = yuv[CrBase + i/4] - 128;
int Cb = yuv[CbBase + i/4] - 128;
int R = 1.164*Y+1.596*Cr;
int G = 1.164*Y-0.392*Cb-0.813*Cr;
int B = 1.164*Y+2.017*Cb;
rgbData[i*3] = (R > 255) ? 255 : ((R < 0) ? 0 : R);
rgbData[i*3+1] = (G > 255) ? 255 : ((G < 0) ? 0 : G);
rgbData[i*3+2] = (B > 255) ? 255 : ((B < 0) ? 0 : B);
}
}
regarding:
*I use the ceiling function (shouldn't it return an int? In my case it's returning a double, why?)*
here is the syntax:
double ceil(double x);
Notice the returned type is double
MAN page for ceil()
Do not do it yourself! Do not do that directly in C++! The only proper approach is to use hardware acceleration for that. You will save lots of battery.
Basically you can utilize OpenGL to that and it will use hardware in your behalf.
Long long time ago I did this for iOS and I'm sure solution for Android will be quite similar. Sadly I left code behind (in old company) so I can't provide you example code. If I find something useful then I will update this answer. In my code YUV (and couple others color formats) was rendered directly on openGL view and OpenGL did required conversion.
Now I'm just pointing finger on OpenGL since other answers are doing this directly on CPU what is a bad choice since it will consume battery a lot and you will never achieve desired performance this way.
Edit:
I've found similar question on SO with some example:
https://stackoverflow.com/a/17110754/1387438
Disclaimer: didn't verified that this example is best approach, but this is a good way to start to look for better solutions.
If for some reason you need do this in C++ code anyway then drop floating point operations in favor of operations on integer types.
I want to add a blur feature to my Android photo editor app. So far, I've made the following code in Cpp to improve speed and efficiency.
class JniBitmap
{
public:
uint32_t* _storedBitmapPixels;
AndroidBitmapInfo _bitmapInfo;
JniBitmap()
{
_storedBitmapPixels = NULL;
}
};
JNIEXPORT void JNICALL Java_com_myapp_utils_NativeBitmapOperations_jniBlurBitmap(JNIEnv * env, jobject obj, jobject handle, uint32_t radius)
{
JniBitmap* jniBitmap = (JniBitmap*) env->GetDirectBufferAddress(handle);
if (jniBitmap->_storedBitmapPixels == NULL) return;
uint32_t width = jniBitmap->_bitmapInfo.width;
uint32_t height = jniBitmap->_bitmapInfo.height;
uint32_t* previousData = jniBitmap->_storedBitmapPixels;
uint32_t* newBitmapPixels = new uint32_t[width * height];
// Array to hold totalRGB
uint8_t totalRGB[3];
uint8_t Pixel_col[3];
uint32_t Pixel_col;
int x, y, kx, ky;
uint8_t tmp;
for (y=0; y<height; y++)
{
for (x=0; x<width; x++)
{
// Colour value RGB
totalRGB[0] = 0.0;
totalRGB[1] = 0.0;
totalRGB[2] = 0.0;
for (ky=-radius; ky<=radius; ky++)
{
for (kx=-radius; kx<=radius; kx++)
{
// Each pixel position
pPixel_col = previousData[(y + ky) * width + x + kx];
totalRBG[0] += (Pixel_col & 0xFF0000) >> 16;
totalRBG[1] += (Pixel_col & 0x00FF00) >> 8;
totalRBG[2] += Pixel_col & 0x0000FF;
}
}
tmp = (radius * 2 + 1) * (radius * 2 + 1);
totalRGB[0] += tmp;
totalRGB[1] += tmp;
totalRGB[2] += tmp;
pPixel_col = totalRGB[0] << 16 + totalRGB[1] << 8 + totalRGB[2];
newBitmapPixels[y * width + x] = pPixel_col;
}
}
delete[] previousData;
jniBitmap->_storedBitmapPixels = newBitmapPixels;
}
I've compiled it with success unsing the latest Android NDK version.
In my Android application, I got this Java code to call the native method:
private ByteBuffer handler = null;
static
{
System.loadLibrary("JniBitmapOperationsLibrary");
}
private native void jniBlurBitmap(ByteBuffer handler, final int radius);
public void blurBitmap(final int radius)
{
if (handler == null) return;
jniBlurBitmap(handler, radius);
}
When I try to call it from my application it gives a blank picture. Did I make something wrong ?
PS: I also have a crop and scale method in my JNI files and they work perfectly. It might be an issue with my Blur algorithm.
Finally, I found the fastest way to do that. At first, I made a JNI script but the Android Support V8 way is clearly faster (About 10 times).
Add the library AND the .so files from the directory SDK/build-tools/android-4.4.2/renderscript/lib/. Copy renderscript-v8.jar to your libs folder and the packaged content to your jni folder.
Use this code
public static Bitmap getBlurredBitmap(Context context, Bitmap bm, int radius)
{
if (bm == null) return null;
if (radius < 1) radius = 1;
if (radius > 25) radius = 25;
Bitmap outputBitmap = Bitmap.createBitmap(bm);
RenderScript rs = RenderScript.create(context);
ScriptIntrinsicBlur theIntrinsic = ScriptIntrinsicBlur.create(rs, Element.U8_4(rs));
Allocation tmpIn = Allocation.createFromBitmap(rs, bm);
Allocation tmpOut = Allocation.createFromBitmap(rs, outputBitmap);
theIntrinsic.setRadius(radius);
theIntrinsic.setInput(tmpIn);
theIntrinsic.forEach(tmpOut);
tmpOut.copyTo(outputBitmap);
return outputBitmap;
}
I am displaying a live camera in SurfaceView using camera.startPreview();. Any idea on how I can get live RGB readings from the camera?
Thanks
I thought I could get the data converted from the SurfaceView. But the best method to use is :
Set the camera's orientation to 90 degrees.
Set output format to NV21 (which is guranteed to be supported on all devices).
Set to turn the Flash ON.
Start preview in the SurfaceView.
List item
camera = Camera.open();
cameraParam = camera.getParameters();
cameraParam.setPreviewFormat(ImageFormat.NV21);
camera.setDisplayOrientation(90);
camera.setParameters(cameraParam);
cameraParam = camera.getParameters();
camera.setPreviewDisplay(surfaceHolder);
cameraParam.setFlashMode(Parameters.FLASH_MODE_TORCH);
camera.setParameters(cameraParam);
camera.startPreview();
Then, I call the setPreviewCallback and onPreviewFrame to get the incoming frame, and convert it to RGB pixel array. Which I can then get intensity of each color in the picture by averaging all pixels intensity by running myPixels array through a for loop, and checking Color.red(myPixels[i]) for each desired color (inside the onPreviewFrame).
camera.setPreviewCallback(new PreviewCallback() {
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
int frameHeight = camera.getParameters().getPreviewSize().height;
int frameWidth = camera.getParameters().getPreviewSize().width;
// number of pixels//transforms NV21 pixel data into RGB pixels
int rgb[] = new int[frameWidth * frameHeight];
// convertion
int[] myPixels = decodeYUV420SP(rgb, data, frameWidth, frameHeight);
}
}
Where decodeYUV420SP is found here.
I timed this operation to take about 200ms for each frame. Is there a faster way of doing it?
You can do similar something like below
camera.takePicture(shutterCallback, rawCallback, jpegCallback);
jpegCallback = new PictureCallback() {
public void onPictureTaken(byte[] data, Camera camera) {
FileOutputStream outStream = null;
try {
Bitmap bitmap = BitmapFactory.decodeByteArray(data, offset, length);
int[] pix = new int[picw * pich];
bitmap.getPixels(pix, 0, picw, 0, 0, picw, pich);
int R, G, B,Y;
for (int y = 0; y < pich; y++){
for (int x = 0; x < picw; x++)
{
int index = y * picw + x;
int R = (pix[index] >> 16) & 0xff; //bitwise shifting
int G = (pix[index] >> 8) & 0xff;
int B = pix[index] & 0xff;
pix[index] = 0xff000000 | (R << 16) | (G << 8) | B;
}}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
}
}
};
here camera.takePicture(shutterCallback, rawCallback, jpegCallback); method call on image capture time , so i think you need to do continually call this method while you camera is open .
as requested here is a fast decoding using NDK ( less then 10 ms on fast device ):
first here is the native.h
#include <jni.h>
#ifndef native_H
#define native_H
extern "C" {
JNIEXPORT jbyteArray JNICALL Com_example_MainActivity_nativeSetIamgeFromCamera(JNIEnv* jenv, jobject obj,jbyteArray array,jint length,jint x,jint y);
};
#endif
native.cpp
#include <stdint.h>
#include <jni.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <android/native_window.h>
#include <android/native_window_jni.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <time.h>
#include "native.h"
void Resize_and_decodyuv(unsigned char * data,int _width,int _height, unsigned char *out, int newWidth, int newHeight);
JNIEXPORT jbyteArray JNICALL Com_example_MainActivity_nativeSetIamgeFromCamera(JNIEnv* jenv, jobject obj,jbyteArray array,jint length,jint x,jint y)
{
//-----jbyteArray array contain the data from the camera passed by the java function
//-----length represent the size of jbyteArray in byte
//-----x,y respectively resolutionx and resolutiony of the image in jbyteArray array
unsigned char * buffImgCamera=(unsigned char *)malloc(length);
//----- copy the buffer from java array to c/c++ char * buffImgCamera
jenv->GetByteArrayRegion(array, 0, length, (jbyte*)buffImgCamera);
int width=400,height=600;//screen reso of the surface(400,800 is just an example)
unsigned char * buffOut=(unsigned char *)malloc(width*height*4);//prepare the result buffer where 4 represent R G B A(Alpha transparency channel).
//--- to gain time i decode and resize the image to fit the surface screen in one loop
Resize_and_decodyuv(buffImgCamera,x,y,buffOut,width,height);
//---copy the result to a jbytearray and return it to java function
jbyteArray result=env->NewByteArray(width*height*4);
env->SetByteArrayRegion( result, 0, width*height*4, buffOut);
return result;
}
void Resize_and_decodyuv(unsigned char * data,int _width,int _height, unsigned char *out, int newWidth, int newHeight)
{
int Colordeep=4;//RGBA; in the case of Qt Frame Work or Borland just put 3and the code should work;
float scaleWidth = (float)newWidth / (float)_width;
float scaleHeight = (float)newHeight / (float)_height;
for(int cy = 0; cy < newHeight; cy++)
{
for(int cx = 0; cx < newWidth; cx++)
{
int pixel = (cy * (newWidth *Colordeep)) + (cx*Colordeep);
int nearestMatch = ((((int)(cy / scaleHeight)) *_width) + (int)(cx /scaleWidth));
int cxa=cx/scaleWidth;
int cya=cy/scaleHeight; cya/=2;
int nearestMatch1 =(cya *_width) + (int)(cxa);
int y = ( data[nearestMatch]);
int v = data[data_uv+(nearestMatch1)];
int u = data[data_uv+(nearestMatch1)+1];
int r = (int) (1164 * (y - 16) + 1596 * (v - 128));
int g = (int) (1164 * (y - 16) - 813 * (v - 128) - 391 * (u - 128));
int b = (int) (1164 * (y - 16) + 2018 * (u - 128));
r/=1000;
g/=1000;
b/=1000;
r = r < 0 ? 0 : (r > 255 ? 255 : r);
g = g < 0 ? 0 : (g > 255 ? 255 : g);
b = b < 0 ? 0 : (b > 255 ? 255 : b);
out[pixel ] = r;
out[pixel +1 ] = g;
out[pixel + 2] = b;
if(Colordeep==4)out[pixel + 3] = 255;
}
}
}
java code
PreviewCallback previewCallback = new PreviewCallback () {
public void onPreviewFrame(byte[] data, Camera camera) {
//nativeSetIamgeFromCamera return a byte array
nativeSetIamgeFromCamera(data,data.length,camera.getParameters().getPreviewSize().width,camera.getParameters().getPreviewSize().height);
}
};
I use Android NDK to set color for individual pixel. It looks something like that:
typedef struct {
uint8_t red;
uint8_t green;
uint8_t blue;
uint8_t alpha;
} rgba;
JNIEXPORT void JNICALL Java_com_package_jniBmpTest(JNIEnv* env, jobject obj, jobject bitmapIn, jobject bitmapOut) {
AndroidBitmapInfo infoIn;
void* pixelsIn;
AndroidBitmapInfo infoOut;
void* pixelsOut;
if ((ret = AndroidBitmap_getInfo(env, bitmapIn, &infoIn)) < 0 ||
(ret = AndroidBitmap_getInfo(env, bitmapOut, &infoOut)) < 0) {
return;
}
if (infoIn.format != ANDROID_BITMAP_FORMAT_RGBA_8888 ||
infoOut.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
return;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmapIn, &pixelsIn)) < 0 ||
(ret = AndroidBitmap_lockPixels(env, bitmapOut, &pixelsOut)) < 0) {
LOGE("Error! %d", ret);
}
rgba* input = (rgba*) pixelsIn;
rgba* output = (rgba*) pixelsOut;
int w = infoIn.width;
int h = infoIn.height;
int n;
for (n = 0; y < w * h; n++) {
output[n].red = input[n].red;
output[n].green = input[n].green;
output[n].blue = input[n].blue;
output[n].alpha = 127;
}
AndroidBitmap_unlockPixels(env, bitmapIn);
AndroidBitmap_unlockPixels(env, bitmapOut);
}
I need to set bitmap semi-transparent (it is simplified example - my code is much more complicated, but bug exists in this code, too).
Problem is that instead of semi-transparent bitmap as a result I have image with corrupted colors. It is semi-transparent, too, but colors are not correct (for example, white color is black, blue is green...). What's the possible problem?
Thanks for help.
Fixed it. Sorry for asking question and answering it after five minutes :)
Solution looks like:
float alpha;
alpha = 0.5;
output[n].red = (int) (input[n].red * alpha);
output[n].green = (int) (input[n].green * alpha);
output[n].blue = (int) (input[n].blue * alpha);
output[n].alpha = (int) (255 * alpha);
My Problem is: I've set up a camera in Android and receive the preview data by using an onPreviewFrame-listener which passes me an byte[] array containing the image data in the default android YUV-format (device does not support R5G6B5-format). Each pixel consists of 12bits which makes the thing a little tricky. Now what I want to do is converting the YUV-data into ARGB-data in order to do image processing with it. This has to be done with renderscript, in order to maintain a high performance.
My idea was to pass two pixels in one element (which would be 24bits = 3 bytes) and then return two ARGB pixels. The problem is, that in Renderscript a u8_3 (a 3dimensional 8bit vector) is stored in 32bit, which means that the last 8 bits are unused. But when copying the image data into the allocation all of the 32bits are used, so the last 8bit get lost. Even if I used a 32bit input data, the last 8bit are useless, because they're only 2/3 of a pixel. When defining an element consisting a 3-byte-array it actually has a real size of 3 bytes. But then the Allocation.copyFrom()-method doesn't fill the in-Allocation with data, argueing it doesn't has the right data type to be filled with a byte[].
The renderscript documentation states, that there is a ScriptIntrinsicYuvToRGB which should do exactly that in API Level 17. But in fact the class doesn't exist. I've downloaded API Level 17 even though it seems not to be downloadable any more. Does anyone have any information about it? Does anyone have ever tried out a ScriptIntrinsic?
So in conclusion my question is: How to convert the camera data into ARGB data fast, hardwareaccelerated?
That's how to do it in Dalvik VM (found the code somewhere online, it works):
#SuppressWarnings("unused")
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
I'm sure you will find the LivePreview test application interesting ... it's part of the Android source code in the latest Jelly Bean (MR1). It implements a camera preview and uses ScriptIntrinsicYuvToRgb to convert the preview data with Renderscript. You can browse the source online here:
LivePreview
I was not able to get running ScriptInstrinsicYuvToRgb, so I decided to write my own RS solution.
Here's ready script (named yuv.rs):
#pragma version(1)
#pragma rs java_package_name(com.package.name)
rs_allocation gIn;
int width;
int height;
int frameSize;
void yuvToRgb(const uchar *v_in, uchar4 *v_out, const void *usrData, uint32_t x, uint32_t y) {
uchar yp = rsGetElementAtYuv_uchar_Y(gIn, x, y) & 0xFF;
int index = frameSize + (x & (~1)) + (( y>>1) * width );
int v = (int)( rsGetElementAt_uchar(gIn, index) & 0xFF ) -128;
int u = (int)( rsGetElementAt_uchar(gIn, index+1) & 0xFF ) -128;
int r = (int) (1.164f * yp + 1.596f * v );
int g = (int) (1.164f * yp - 0.813f * v - 0.391f * u);
int b = (int) (1.164f * yp + 2.018f * u );
r = r>255? 255 : r<0 ? 0 : r;
g = g>255? 255 : g<0 ? 0 : g;
b = b>255? 255 : b<0 ? 0 : b;
uchar4 res4;
res4.r = (uchar)r;
res4.g = (uchar)g;
res4.b = (uchar)b;
res4.a = 0xFF;
*v_out = res4;
}
Don't forget to set camera preview format to NV21:
Parameters cameraParameters = camera.getParameters();
cameraParameters.setPreviewFormat(ImageFormat.NV21);
// Other camera init stuff: preview size, framerate, etc.
camera.setParameters(cameraParameters);
Allocations initialization and script usage:
// Somewhere in initialization section
// w and h are variables for selected camera preview size
rs = RenderScript.create(this);
Type.Builder tbIn = new Type.Builder(rs, Element.U8(rs));
tbIn.setX(w);
tbIn.setY(h);
tbIn.setYuvFormat(ImageFormat.NV21);
Type.Builder tbOut = new Type.Builder(rs, Element.RGBA_8888(rs));
tbOut.setX(w);
tbOut.setY(h);
inData = Allocation.createTyped(rs, tbIn.create(), Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT & Allocation.USAGE_SHARED);
outData = Allocation.createTyped(rs, tbOut.create(), Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT & Allocation.USAGE_SHARED);
outputBitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
yuvScript = new ScriptC_yuv(rs);
yuvScript.set_gIn(inData);
yuvScript.set_width(w);
yuvScript.set_height(h);
yuvScript.set_frameSize(previewSize);
//.....
Camera callback method:
public void onPreviewFrame(byte[] data, Camera camera) {
// In your camera callback, data
inData.copyFrom(data);
yuvScript.forEach_yuvToRgb(inData, outData);
outData.copyTo(outputBitmap);
// draw your bitmap where you want to
// .....
}
For anyone who didn't know, RenderScript is now in the Android Support Library, including intrinsics.
http://android-developers.blogspot.com.au/2013/09/renderscript-in-android-support-library.html
http://android-developers.blogspot.com.au/2013/08/renderscript-intrinsics.html
We now have the new renderscript-intrinsics-replacement-toolkit to do it. First, build and import the renderscript module to your project and add it as a dependency to your app module. Then, go to Toolkit.kt and add the following:
fun toNv21(image: Image): ByteArray? {
val nv21 = ByteArray((image.width * image.height * 1.5f).toInt())
return if (!nativeYuv420toNv21(
nativeHandle,
image.width,
image.height,
image.planes[0].buffer, // Y buffer
image.planes[1].buffer, // U buffer
image.planes[2].buffer, // V buffer
image.planes[0].pixelStride, // Y pixel stride
image.planes[1].pixelStride, // U/V pixel stride
image.planes[0].rowStride, // Y row stride
image.planes[1].rowStride, // U/V row stride
nv21
)
) {
null
} else nv21
}
private external fun nativeYuv420toNv21(
nativeHandle: Long,
imageWidth: Int,
imageHeight: Int,
yByteBuffer: ByteBuffer,
uByteBuffer: ByteBuffer,
vByteBuffer: ByteBuffer,
yPixelStride: Int,
uvPixelStride: Int,
yRowStride: Int,
uvRowStride: Int,
nv21Output: ByteArray
): Boolean
Now, go to JniEntryPoints.cpp and add the following:
extern "C" JNIEXPORT jboolean JNICALL Java_com_google_android_renderscript_Toolkit_nativeYuv420toNv21(
JNIEnv *env, jobject/*thiz*/, jlong native_handle,
jint image_width, jint image_height, jobject y_byte_buffer,
jobject u_byte_buffer, jobject v_byte_buffer, jint y_pixel_stride,
jint uv_pixel_stride, jint y_row_stride, jint uv_row_stride,
jbyteArray nv21_array) {
auto y_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(y_byte_buffer));
auto u_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(u_byte_buffer));
auto v_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(v_byte_buffer));
jbyte* nv21 = env->GetByteArrayElements(nv21_array, nullptr);
if (nv21 == nullptr || y_buffer == nullptr || u_buffer == nullptr
|| v_buffer == nullptr) {
// Log this.
return false;
}
RenderScriptToolkit* toolkit = reinterpret_cast<RenderScriptToolkit*>(native_handle);
toolkit->yuv420toNv21(image_width, image_height, y_buffer, u_buffer, v_buffer,
y_pixel_stride, uv_pixel_stride, y_row_stride, uv_row_stride,
nv21);
env->ReleaseByteArrayElements(nv21_array, nv21, 0);
return true;
}
Go to YuvToRgb.cpp and add the following:
void RenderScriptToolkit::yuv420toNv21(int image_width, int image_height, const int8_t* y_buffer,
const int8_t* u_buffer, const int8_t* v_buffer, int y_pixel_stride,
int uv_pixel_stride, int y_row_stride, int uv_row_stride,
int8_t *nv21) {
// Copy Y channel.
for(int y = 0; y < image_height; ++y) {
int destOffset = image_width * y;
int yOffset = y * y_row_stride;
memcpy(nv21 + destOffset, y_buffer + yOffset, image_width);
}
if (v_buffer - u_buffer == sizeof(int8_t)) {
// format = nv21
// TODO: If the format is VUVUVU & pixel stride == 1 we can simply the copy
// with memcpy. In Android Camera2 I have mostly come across UVUVUV packaging
// though.
}
// Copy UV Channel.
int idUV = image_width * image_height;
int uv_width = image_width / 2;
int uv_height = image_height / 2;
for(int y = 0; y < uv_height; ++y) {
int uvOffset = y * uv_row_stride;
for (int x = 0; x < uv_width; ++x) {
int bufferIndex = uvOffset + (x * uv_pixel_stride);
// V channel.
nv21[idUV++] = v_buffer[bufferIndex];
// U channel.
nv21[idUV++] = u_buffer[bufferIndex];
}
}
}
Finally, go to RenderscriptToolkit.h and add the following:
/**
* https://blog.minhazav.dev/how-to-use-renderscript-to-convert-YUV_420_888-yuv-image-to-bitmap/#tobitmapimage-image-method
* #param image_width width of the image you want to convert to byte array
* #param image_height height of the image you want to convert to byte array
* #param y_buffer Y buffer
* #param u_buffer U buffer
* #param v_buffer V buffer
* #param y_pixel_stride Y pixel stride
* #param uv_pixel_stride UV pixel stride
* #param y_row_stride Y row stride
* #param uv_row_stride UV row stride
* #param nv21 the output byte array
*/
void yuv420toNv21(int image_width, int image_height, const int8_t* y_buffer,
const int8_t* u_buffer, const int8_t* v_buffer, int y_pixel_stride,
int uv_pixel_stride, int y_row_stride, int uv_row_stride,
int8_t *nv21);
You are now ready to harness the full power of renderscript. Below, I am providing an example with the ARCore Camera Image object (replace the first line with whatever code gives you your camera image):
val cameraImage = arFrame.frame.acquireCameraImage()
val width = cameraImage.width
val height = cameraImage.height
val byteArray = Toolkit.toNv21(cameraImage)
byteArray?.let {
Toolkit.yuvToRgbBitmap(
byteArray,
width,
height,
YuvFormat.NV21
).let { bitmap ->
saveBitmapToDevice(
name,
session,
bitmap,
context
)}}