Android JNI Lens blur - android

I want to add a blur feature to my Android photo editor app. So far, I've made the following code in Cpp to improve speed and efficiency.
class JniBitmap
{
public:
uint32_t* _storedBitmapPixels;
AndroidBitmapInfo _bitmapInfo;
JniBitmap()
{
_storedBitmapPixels = NULL;
}
};
JNIEXPORT void JNICALL Java_com_myapp_utils_NativeBitmapOperations_jniBlurBitmap(JNIEnv * env, jobject obj, jobject handle, uint32_t radius)
{
JniBitmap* jniBitmap = (JniBitmap*) env->GetDirectBufferAddress(handle);
if (jniBitmap->_storedBitmapPixels == NULL) return;
uint32_t width = jniBitmap->_bitmapInfo.width;
uint32_t height = jniBitmap->_bitmapInfo.height;
uint32_t* previousData = jniBitmap->_storedBitmapPixels;
uint32_t* newBitmapPixels = new uint32_t[width * height];
// Array to hold totalRGB
uint8_t totalRGB[3];
uint8_t Pixel_col[3];
uint32_t Pixel_col;
int x, y, kx, ky;
uint8_t tmp;
for (y=0; y<height; y++)
{
for (x=0; x<width; x++)
{
// Colour value RGB
totalRGB[0] = 0.0;
totalRGB[1] = 0.0;
totalRGB[2] = 0.0;
for (ky=-radius; ky<=radius; ky++)
{
for (kx=-radius; kx<=radius; kx++)
{
// Each pixel position
pPixel_col = previousData[(y + ky) * width + x + kx];
totalRBG[0] += (Pixel_col & 0xFF0000) >> 16;
totalRBG[1] += (Pixel_col & 0x00FF00) >> 8;
totalRBG[2] += Pixel_col & 0x0000FF;
}
}
tmp = (radius * 2 + 1) * (radius * 2 + 1);
totalRGB[0] += tmp;
totalRGB[1] += tmp;
totalRGB[2] += tmp;
pPixel_col = totalRGB[0] << 16 + totalRGB[1] << 8 + totalRGB[2];
newBitmapPixels[y * width + x] = pPixel_col;
}
}
delete[] previousData;
jniBitmap->_storedBitmapPixels = newBitmapPixels;
}
I've compiled it with success unsing the latest Android NDK version.
In my Android application, I got this Java code to call the native method:
private ByteBuffer handler = null;
static
{
System.loadLibrary("JniBitmapOperationsLibrary");
}
private native void jniBlurBitmap(ByteBuffer handler, final int radius);
public void blurBitmap(final int radius)
{
if (handler == null) return;
jniBlurBitmap(handler, radius);
}
When I try to call it from my application it gives a blank picture. Did I make something wrong ?
PS: I also have a crop and scale method in my JNI files and they work perfectly. It might be an issue with my Blur algorithm.

Finally, I found the fastest way to do that. At first, I made a JNI script but the Android Support V8 way is clearly faster (About 10 times).
Add the library AND the .so files from the directory SDK/build-tools/android-4.4.2/renderscript/lib/. Copy renderscript-v8.jar to your libs folder and the packaged content to your jni folder.
Use this code
public static Bitmap getBlurredBitmap(Context context, Bitmap bm, int radius)
{
if (bm == null) return null;
if (radius < 1) radius = 1;
if (radius > 25) radius = 25;
Bitmap outputBitmap = Bitmap.createBitmap(bm);
RenderScript rs = RenderScript.create(context);
ScriptIntrinsicBlur theIntrinsic = ScriptIntrinsicBlur.create(rs, Element.U8_4(rs));
Allocation tmpIn = Allocation.createFromBitmap(rs, bm);
Allocation tmpOut = Allocation.createFromBitmap(rs, outputBitmap);
theIntrinsic.setRadius(radius);
theIntrinsic.setInput(tmpIn);
theIntrinsic.forEach(tmpOut);
tmpOut.copyTo(outputBitmap);
return outputBitmap;
}

Related

NDK performance

I've just started to play around with NDK to explore the sweet performance boost that I've been promised. To get a feel for the difference, I tried a dumb number-crunching task (render a Mandelbrot set to a bitmap) and compared it to a Java version of the same code. To my big surprise, the C version is significantly slower (5.0 seconds vs 1.6 on my HTC One, on average). Even stranger, the cost isn't because of the overhead of making a native call, but it's the actual number-crunching that takes longer.
This can't be right, can it? What did I miss?
C version (debug timer code removed):
const int MAX_ITER = 63;
const float MAX_DEPTH = 16;
static uint16_t rgb565(int red, int green, int blue)
{
return (uint16_t)(((red << 8) & 0xf800) | ((green << 2) & 0x03e0) | ((blue >> 3) & 0x001f));
}
float zAbs(float re, float im) {
return re*re + im*im;
}
int depth(float cRe, float cIm) {
int i=0;
float re, im;
float zRe = 0.0f;
float zIm = 0.0f;
while ((zAbs(zRe, zIm) < MAX_DEPTH) && (i < MAX_ITER)) {
re = zRe * zRe - zIm * zIm + cRe;
im = 2.0f * zRe * zIm + cIm;
zRe = re;
zIm = im;
i++;
}
return i;
}
extern "C"
void Java_com_example_ndktest_MainActivity_renderFractal(JNIEnv* env, jobject thiz, jobject bitmap, float re0, float im0, float b)
{
AndroidBitmapInfo info;
void* pixels;
int ret;
long t0 = currentTimeInMilliseconds();
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
LOGE("AndroidBitmap_getInfo() failed ! error=%d", ret);
return;
}
if (info.format != ANDROID_BITMAP_FORMAT_RGB_565) {
LOGE("Bitmap format is not RGB_565 !");
return;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &pixels)) < 0) {
LOGE("AndroidBitmap_lockPixels() failed ! error=%d", ret);
}
int w = info.width;
int h = info.height;
float re, im;
int z = 0;
uint16_t* px = (uint16_t*)pixels;
for(int y=0; y<h; y++) {
im = im0 + b*((float)y/(float)h);
for(int x=0; x<info.width; x++) {
re = re0 + b*((float)x/(float)w);
z = depth(re, im);
px[y*w + x] = rgb565(0, z*4, z * 16);
}
}
AndroidBitmap_unlockPixels(env, bitmap);
}
Java version:
private static final int MAX_ITER = 63;
private static final float MAX_DEPTH = 16;
static int rgb565(int red, int green, int blue)
{
return ((red << 8) & 0xf800) | ((green << 2) & 0x03e0) | ((blue >> 3) & 0x001f);
}
static float zAbs(float re, float im) {
return re*re + im*im;
}
static int depth(float cRe, float cIm) {
int i=0;
float re, im;
float zRe = 0.0f;
float zIm = 0.0f;
while ((zAbs(zRe, zIm) < MAX_DEPTH) && (i < MAX_ITER)) {
re = zRe * zRe - zIm * zIm + cRe;
im = 2.0f * zRe * zIm + cIm;
zRe = re;
zIm = im;
i++;
}
return i;
}
static void renderFractal(Bitmap bitmap, float re0, float im0, float b)
{
int w = bitmap.getWidth();
int h = bitmap.getHeight();
int[] pixels = new int[w * h];
bitmap.getPixels(pixels, 0, w, 0, 0, w, h);
float re, im;
int z = 0;
for(int y=0; y<h; y++) {
im = im0 + b*((float)y/(float)h);
for(int x=0; x<w; x++) {
re = re0 + b*((float)x/(float)w);
z = depth(re, im);
pixels[y*w + x] = rgb565(0, z*4, z * 16);
}
}
bitmap.setPixels(pixels, 0, w, 0, 0, w, h);
}
As noted in the comments, this was because the NDK code was built for the armeabi target rather than the armeabi-v7a target. The former is intended to work across a broad range of hardware, including devices without floating-point hardware, so it does all floating-point calculations in software.
Building for armeabi-v7a enables the VFP instructions, so anything that relies heavily on floating point calculations will speed up dramatically.
If you build exclusively for armeabi-v7a, you will exclude a fairly broad selection of devices, even relatively recent ones (e.g. the Samsung Galaxy Ace). These devices have VFP support, but the CPU is based on the ARMv6 instruction set rather than ARMv7. There is no "pre-ARMv7 CPU with VFP" build target, so you have to build for armeabi, or use custom build rules and careful selection of supported devices.
At the other end of the spectrum, you may get a small performance boost by specifying hard-float ABI within your armeabi-v7a library (-mhard-float -- requires NDK r9b).
FWIW, one of the selling points of just-in-time compilers like the one in Dalvik is that they can recognize the system capabilities and adapt code generation appropriately.

Get RGB from a SurfaceView displaying Live camera

I am displaying a live camera in SurfaceView using camera.startPreview();. Any idea on how I can get live RGB readings from the camera?
Thanks
I thought I could get the data converted from the SurfaceView. But the best method to use is :
Set the camera's orientation to 90 degrees.
Set output format to NV21 (which is guranteed to be supported on all devices).
Set to turn the Flash ON.
Start preview in the SurfaceView.
List item
camera = Camera.open();
cameraParam = camera.getParameters();
cameraParam.setPreviewFormat(ImageFormat.NV21);
camera.setDisplayOrientation(90);
camera.setParameters(cameraParam);
cameraParam = camera.getParameters();
camera.setPreviewDisplay(surfaceHolder);
cameraParam.setFlashMode(Parameters.FLASH_MODE_TORCH);
camera.setParameters(cameraParam);
camera.startPreview();
Then, I call the setPreviewCallback and onPreviewFrame to get the incoming frame, and convert it to RGB pixel array. Which I can then get intensity of each color in the picture by averaging all pixels intensity by running myPixels array through a for loop, and checking Color.red(myPixels[i]) for each desired color (inside the onPreviewFrame).
camera.setPreviewCallback(new PreviewCallback() {
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
int frameHeight = camera.getParameters().getPreviewSize().height;
int frameWidth = camera.getParameters().getPreviewSize().width;
// number of pixels//transforms NV21 pixel data into RGB pixels
int rgb[] = new int[frameWidth * frameHeight];
// convertion
int[] myPixels = decodeYUV420SP(rgb, data, frameWidth, frameHeight);
}
}
Where decodeYUV420SP is found here.
I timed this operation to take about 200ms for each frame. Is there a faster way of doing it?
You can do similar something like below
camera.takePicture(shutterCallback, rawCallback, jpegCallback);
jpegCallback = new PictureCallback() {
public void onPictureTaken(byte[] data, Camera camera) {
FileOutputStream outStream = null;
try {
Bitmap bitmap = BitmapFactory.decodeByteArray(data, offset, length);
int[] pix = new int[picw * pich];
bitmap.getPixels(pix, 0, picw, 0, 0, picw, pich);
int R, G, B,Y;
for (int y = 0; y < pich; y++){
for (int x = 0; x < picw; x++)
{
int index = y * picw + x;
int R = (pix[index] >> 16) & 0xff; //bitwise shifting
int G = (pix[index] >> 8) & 0xff;
int B = pix[index] & 0xff;
pix[index] = 0xff000000 | (R << 16) | (G << 8) | B;
}}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} finally {
}
}
};
here camera.takePicture(shutterCallback, rawCallback, jpegCallback); method call on image capture time , so i think you need to do continually call this method while you camera is open .
as requested here is a fast decoding using NDK ( less then 10 ms on fast device ):
first here is the native.h
#include <jni.h>
#ifndef native_H
#define native_H
extern "C" {
JNIEXPORT jbyteArray JNICALL Com_example_MainActivity_nativeSetIamgeFromCamera(JNIEnv* jenv, jobject obj,jbyteArray array,jint length,jint x,jint y);
};
#endif
native.cpp
#include <stdint.h>
#include <jni.h>
#include <stdlib.h>
#include <string.h>
#include <stdio.h>
#include <android/native_window.h>
#include <android/native_window_jni.h>
#include <sys/types.h>
#include <stdio.h>
#include <string.h>
#include <sys/types.h>
#include <time.h>
#include "native.h"
void Resize_and_decodyuv(unsigned char * data,int _width,int _height, unsigned char *out, int newWidth, int newHeight);
JNIEXPORT jbyteArray JNICALL Com_example_MainActivity_nativeSetIamgeFromCamera(JNIEnv* jenv, jobject obj,jbyteArray array,jint length,jint x,jint y)
{
//-----jbyteArray array contain the data from the camera passed by the java function
//-----length represent the size of jbyteArray in byte
//-----x,y respectively resolutionx and resolutiony of the image in jbyteArray array
unsigned char * buffImgCamera=(unsigned char *)malloc(length);
//----- copy the buffer from java array to c/c++ char * buffImgCamera
jenv->GetByteArrayRegion(array, 0, length, (jbyte*)buffImgCamera);
int width=400,height=600;//screen reso of the surface(400,800 is just an example)
unsigned char * buffOut=(unsigned char *)malloc(width*height*4);//prepare the result buffer where 4 represent R G B A(Alpha transparency channel).
//--- to gain time i decode and resize the image to fit the surface screen in one loop
Resize_and_decodyuv(buffImgCamera,x,y,buffOut,width,height);
//---copy the result to a jbytearray and return it to java function
jbyteArray result=env->NewByteArray(width*height*4);
env->SetByteArrayRegion( result, 0, width*height*4, buffOut);
return result;
}
void Resize_and_decodyuv(unsigned char * data,int _width,int _height, unsigned char *out, int newWidth, int newHeight)
{
int Colordeep=4;//RGBA; in the case of Qt Frame Work or Borland just put 3and the code should work;
float scaleWidth = (float)newWidth / (float)_width;
float scaleHeight = (float)newHeight / (float)_height;
for(int cy = 0; cy < newHeight; cy++)
{
for(int cx = 0; cx < newWidth; cx++)
{
int pixel = (cy * (newWidth *Colordeep)) + (cx*Colordeep);
int nearestMatch = ((((int)(cy / scaleHeight)) *_width) + (int)(cx /scaleWidth));
int cxa=cx/scaleWidth;
int cya=cy/scaleHeight; cya/=2;
int nearestMatch1 =(cya *_width) + (int)(cxa);
int y = ( data[nearestMatch]);
int v = data[data_uv+(nearestMatch1)];
int u = data[data_uv+(nearestMatch1)+1];
int r = (int) (1164 * (y - 16) + 1596 * (v - 128));
int g = (int) (1164 * (y - 16) - 813 * (v - 128) - 391 * (u - 128));
int b = (int) (1164 * (y - 16) + 2018 * (u - 128));
r/=1000;
g/=1000;
b/=1000;
r = r < 0 ? 0 : (r > 255 ? 255 : r);
g = g < 0 ? 0 : (g > 255 ? 255 : g);
b = b < 0 ? 0 : (b > 255 ? 255 : b);
out[pixel ] = r;
out[pixel +1 ] = g;
out[pixel + 2] = b;
if(Colordeep==4)out[pixel + 3] = 255;
}
}
}
java code
PreviewCallback previewCallback = new PreviewCallback () {
public void onPreviewFrame(byte[] data, Camera camera) {
//nativeSetIamgeFromCamera return a byte array
nativeSetIamgeFromCamera(data,data.length,camera.getParameters().getPreviewSize().width,camera.getParameters().getPreviewSize().height);
}
};

Corrupted colors after Bitmap processing

I use Android NDK to set color for individual pixel. It looks something like that:
typedef struct {
uint8_t red;
uint8_t green;
uint8_t blue;
uint8_t alpha;
} rgba;
JNIEXPORT void JNICALL Java_com_package_jniBmpTest(JNIEnv* env, jobject obj, jobject bitmapIn, jobject bitmapOut) {
AndroidBitmapInfo infoIn;
void* pixelsIn;
AndroidBitmapInfo infoOut;
void* pixelsOut;
if ((ret = AndroidBitmap_getInfo(env, bitmapIn, &infoIn)) < 0 ||
(ret = AndroidBitmap_getInfo(env, bitmapOut, &infoOut)) < 0) {
return;
}
if (infoIn.format != ANDROID_BITMAP_FORMAT_RGBA_8888 ||
infoOut.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
return;
}
if ((ret = AndroidBitmap_lockPixels(env, bitmapIn, &pixelsIn)) < 0 ||
(ret = AndroidBitmap_lockPixels(env, bitmapOut, &pixelsOut)) < 0) {
LOGE("Error! %d", ret);
}
rgba* input = (rgba*) pixelsIn;
rgba* output = (rgba*) pixelsOut;
int w = infoIn.width;
int h = infoIn.height;
int n;
for (n = 0; y < w * h; n++) {
output[n].red = input[n].red;
output[n].green = input[n].green;
output[n].blue = input[n].blue;
output[n].alpha = 127;
}
AndroidBitmap_unlockPixels(env, bitmapIn);
AndroidBitmap_unlockPixels(env, bitmapOut);
}
I need to set bitmap semi-transparent (it is simplified example - my code is much more complicated, but bug exists in this code, too).
Problem is that instead of semi-transparent bitmap as a result I have image with corrupted colors. It is semi-transparent, too, but colors are not correct (for example, white color is black, blue is green...). What's the possible problem?
Thanks for help.
Fixed it. Sorry for asking question and answering it after five minutes :)
Solution looks like:
float alpha;
alpha = 0.5;
output[n].red = (int) (input[n].red * alpha);
output[n].green = (int) (input[n].green * alpha);
output[n].blue = (int) (input[n].blue * alpha);
output[n].alpha = (int) (255 * alpha);

Converting camera YUV-data to ARGB with renderscript

My Problem is: I've set up a camera in Android and receive the preview data by using an onPreviewFrame-listener which passes me an byte[] array containing the image data in the default android YUV-format (device does not support R5G6B5-format). Each pixel consists of 12bits which makes the thing a little tricky. Now what I want to do is converting the YUV-data into ARGB-data in order to do image processing with it. This has to be done with renderscript, in order to maintain a high performance.
My idea was to pass two pixels in one element (which would be 24bits = 3 bytes) and then return two ARGB pixels. The problem is, that in Renderscript a u8_3 (a 3dimensional 8bit vector) is stored in 32bit, which means that the last 8 bits are unused. But when copying the image data into the allocation all of the 32bits are used, so the last 8bit get lost. Even if I used a 32bit input data, the last 8bit are useless, because they're only 2/3 of a pixel. When defining an element consisting a 3-byte-array it actually has a real size of 3 bytes. But then the Allocation.copyFrom()-method doesn't fill the in-Allocation with data, argueing it doesn't has the right data type to be filled with a byte[].
The renderscript documentation states, that there is a ScriptIntrinsicYuvToRGB which should do exactly that in API Level 17. But in fact the class doesn't exist. I've downloaded API Level 17 even though it seems not to be downloadable any more. Does anyone have any information about it? Does anyone have ever tried out a ScriptIntrinsic?
So in conclusion my question is: How to convert the camera data into ARGB data fast, hardwareaccelerated?
That's how to do it in Dalvik VM (found the code somewhere online, it works):
#SuppressWarnings("unused")
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
I'm sure you will find the LivePreview test application interesting ... it's part of the Android source code in the latest Jelly Bean (MR1). It implements a camera preview and uses ScriptIntrinsicYuvToRgb to convert the preview data with Renderscript. You can browse the source online here:
LivePreview
I was not able to get running ScriptInstrinsicYuvToRgb, so I decided to write my own RS solution.
Here's ready script (named yuv.rs):
#pragma version(1)
#pragma rs java_package_name(com.package.name)
rs_allocation gIn;
int width;
int height;
int frameSize;
void yuvToRgb(const uchar *v_in, uchar4 *v_out, const void *usrData, uint32_t x, uint32_t y) {
uchar yp = rsGetElementAtYuv_uchar_Y(gIn, x, y) & 0xFF;
int index = frameSize + (x & (~1)) + (( y>>1) * width );
int v = (int)( rsGetElementAt_uchar(gIn, index) & 0xFF ) -128;
int u = (int)( rsGetElementAt_uchar(gIn, index+1) & 0xFF ) -128;
int r = (int) (1.164f * yp + 1.596f * v );
int g = (int) (1.164f * yp - 0.813f * v - 0.391f * u);
int b = (int) (1.164f * yp + 2.018f * u );
r = r>255? 255 : r<0 ? 0 : r;
g = g>255? 255 : g<0 ? 0 : g;
b = b>255? 255 : b<0 ? 0 : b;
uchar4 res4;
res4.r = (uchar)r;
res4.g = (uchar)g;
res4.b = (uchar)b;
res4.a = 0xFF;
*v_out = res4;
}
Don't forget to set camera preview format to NV21:
Parameters cameraParameters = camera.getParameters();
cameraParameters.setPreviewFormat(ImageFormat.NV21);
// Other camera init stuff: preview size, framerate, etc.
camera.setParameters(cameraParameters);
Allocations initialization and script usage:
// Somewhere in initialization section
// w and h are variables for selected camera preview size
rs = RenderScript.create(this);
Type.Builder tbIn = new Type.Builder(rs, Element.U8(rs));
tbIn.setX(w);
tbIn.setY(h);
tbIn.setYuvFormat(ImageFormat.NV21);
Type.Builder tbOut = new Type.Builder(rs, Element.RGBA_8888(rs));
tbOut.setX(w);
tbOut.setY(h);
inData = Allocation.createTyped(rs, tbIn.create(), Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT & Allocation.USAGE_SHARED);
outData = Allocation.createTyped(rs, tbOut.create(), Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT & Allocation.USAGE_SHARED);
outputBitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
yuvScript = new ScriptC_yuv(rs);
yuvScript.set_gIn(inData);
yuvScript.set_width(w);
yuvScript.set_height(h);
yuvScript.set_frameSize(previewSize);
//.....
Camera callback method:
public void onPreviewFrame(byte[] data, Camera camera) {
// In your camera callback, data
inData.copyFrom(data);
yuvScript.forEach_yuvToRgb(inData, outData);
outData.copyTo(outputBitmap);
// draw your bitmap where you want to
// .....
}
For anyone who didn't know, RenderScript is now in the Android Support Library, including intrinsics.
http://android-developers.blogspot.com.au/2013/09/renderscript-in-android-support-library.html
http://android-developers.blogspot.com.au/2013/08/renderscript-intrinsics.html
We now have the new renderscript-intrinsics-replacement-toolkit to do it. First, build and import the renderscript module to your project and add it as a dependency to your app module. Then, go to Toolkit.kt and add the following:
fun toNv21(image: Image): ByteArray? {
val nv21 = ByteArray((image.width * image.height * 1.5f).toInt())
return if (!nativeYuv420toNv21(
nativeHandle,
image.width,
image.height,
image.planes[0].buffer, // Y buffer
image.planes[1].buffer, // U buffer
image.planes[2].buffer, // V buffer
image.planes[0].pixelStride, // Y pixel stride
image.planes[1].pixelStride, // U/V pixel stride
image.planes[0].rowStride, // Y row stride
image.planes[1].rowStride, // U/V row stride
nv21
)
) {
null
} else nv21
}
private external fun nativeYuv420toNv21(
nativeHandle: Long,
imageWidth: Int,
imageHeight: Int,
yByteBuffer: ByteBuffer,
uByteBuffer: ByteBuffer,
vByteBuffer: ByteBuffer,
yPixelStride: Int,
uvPixelStride: Int,
yRowStride: Int,
uvRowStride: Int,
nv21Output: ByteArray
): Boolean
Now, go to JniEntryPoints.cpp and add the following:
extern "C" JNIEXPORT jboolean JNICALL Java_com_google_android_renderscript_Toolkit_nativeYuv420toNv21(
JNIEnv *env, jobject/*thiz*/, jlong native_handle,
jint image_width, jint image_height, jobject y_byte_buffer,
jobject u_byte_buffer, jobject v_byte_buffer, jint y_pixel_stride,
jint uv_pixel_stride, jint y_row_stride, jint uv_row_stride,
jbyteArray nv21_array) {
auto y_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(y_byte_buffer));
auto u_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(u_byte_buffer));
auto v_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(v_byte_buffer));
jbyte* nv21 = env->GetByteArrayElements(nv21_array, nullptr);
if (nv21 == nullptr || y_buffer == nullptr || u_buffer == nullptr
|| v_buffer == nullptr) {
// Log this.
return false;
}
RenderScriptToolkit* toolkit = reinterpret_cast<RenderScriptToolkit*>(native_handle);
toolkit->yuv420toNv21(image_width, image_height, y_buffer, u_buffer, v_buffer,
y_pixel_stride, uv_pixel_stride, y_row_stride, uv_row_stride,
nv21);
env->ReleaseByteArrayElements(nv21_array, nv21, 0);
return true;
}
Go to YuvToRgb.cpp and add the following:
void RenderScriptToolkit::yuv420toNv21(int image_width, int image_height, const int8_t* y_buffer,
const int8_t* u_buffer, const int8_t* v_buffer, int y_pixel_stride,
int uv_pixel_stride, int y_row_stride, int uv_row_stride,
int8_t *nv21) {
// Copy Y channel.
for(int y = 0; y < image_height; ++y) {
int destOffset = image_width * y;
int yOffset = y * y_row_stride;
memcpy(nv21 + destOffset, y_buffer + yOffset, image_width);
}
if (v_buffer - u_buffer == sizeof(int8_t)) {
// format = nv21
// TODO: If the format is VUVUVU & pixel stride == 1 we can simply the copy
// with memcpy. In Android Camera2 I have mostly come across UVUVUV packaging
// though.
}
// Copy UV Channel.
int idUV = image_width * image_height;
int uv_width = image_width / 2;
int uv_height = image_height / 2;
for(int y = 0; y < uv_height; ++y) {
int uvOffset = y * uv_row_stride;
for (int x = 0; x < uv_width; ++x) {
int bufferIndex = uvOffset + (x * uv_pixel_stride);
// V channel.
nv21[idUV++] = v_buffer[bufferIndex];
// U channel.
nv21[idUV++] = u_buffer[bufferIndex];
}
}
}
Finally, go to RenderscriptToolkit.h and add the following:
/**
* https://blog.minhazav.dev/how-to-use-renderscript-to-convert-YUV_420_888-yuv-image-to-bitmap/#tobitmapimage-image-method
* #param image_width width of the image you want to convert to byte array
* #param image_height height of the image you want to convert to byte array
* #param y_buffer Y buffer
* #param u_buffer U buffer
* #param v_buffer V buffer
* #param y_pixel_stride Y pixel stride
* #param uv_pixel_stride UV pixel stride
* #param y_row_stride Y row stride
* #param uv_row_stride UV row stride
* #param nv21 the output byte array
*/
void yuv420toNv21(int image_width, int image_height, const int8_t* y_buffer,
const int8_t* u_buffer, const int8_t* v_buffer, int y_pixel_stride,
int uv_pixel_stride, int y_row_stride, int uv_row_stride,
int8_t *nv21);
You are now ready to harness the full power of renderscript. Below, I am providing an example with the ARCore Camera Image object (replace the first line with whatever code gives you your camera image):
val cameraImage = arFrame.frame.acquireCameraImage()
val width = cameraImage.width
val height = cameraImage.height
val byteArray = Toolkit.toNv21(cameraImage)
byteArray?.let {
Toolkit.yuvToRgbBitmap(
byteArray,
width,
height,
YuvFormat.NV21
).let { bitmap ->
saveBitmapToDevice(
name,
session,
bitmap,
context
)}}

PNG blending mode

In Photoshop there are a couple of blending modes, such as:
Color Burn
Multiply
Darken
Lighten
Overlay
(more information here: http://help.adobe.com/en_US/photoshop/cs/using/WSfd1234e1c4b69f30ea53e41001031ab64-77eba.html)
For example, for mode Color Burn "looks at the color information in each channel and darkens the base color to reflect the blend color by increasing the contrast between the two. Blending with white produces no change".
This is my code:
Bitmap blendBitmap = BitmapFactory.decodeStream(ctx.getAssets().open(filename));
Canvas canvas = new Canvas(srcBitmap);
canvas.drawBitmap(blendBitmap, 0, 0, null); // ?
p.recycle();
p = null;
Is it possible to apply, for example, Color Burn blending mode instead of simply drawing image above other image (like in this small code)?
It was not difficult.
I used NDK (because of performance) to manipulate with pixels. This information of blending modes was very useful: How does photoshop blend two images together?
My final solution is:
#define ChannelBlend_ColorBurn(A, B) ((uint8_t) ((B == 0) ? B : max(0, (255 - ((255 -
#define ChannelBlend_Alpha(A, B, O) ((uint8_t) (O * A + (1 - O) * B))
#define ChannelBlend_AlphaF(A, B, F, O) (ChannelBlend_Alpha(F(A, B), A, O))
typedef struct {
uint8_t red;
uint8_t green;
uint8_t blue;
uint8_t alpha;
} rgba;
// Blend
JNIEXPORT void
JNICALL Java_com_package_Filter_jniBlend(JNIEnv* env, jobject obj, jobject bitmapA,
jobject bitmapB, jobject bitmapOut, jint mode) {
// Properties
AndroidBitmapInfo infoA;
void* pixelsA;
AndroidBitmapInfo infoB;
void* pixelsB;
AndroidBitmapInfo infoOut;
void* pixelsOut;
int ret;
// Get image info
if ((ret = AndroidBitmap_getInfo(env, bitmapA, &infoA)) < 0 ||
(ret = AndroidBitmap_getInfo(env, bitmapB, &infoB)) < 0 ||
(ret = AndroidBitmap_getInfo(env, bitmapOut, &infoOut)) < 0) {
return;
}
// Check image
if (infoA.format != ANDROID_BITMAP_FORMAT_RGBA_8888 ||
infoB.format != ANDROID_BITMAP_FORMAT_RGBA_8888 ||
infoOut.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
return;
}
// Lock all images
if ((ret = AndroidBitmap_lockPixels(env, bitmapA, &pixelsA)) < 0 ||
(ret = AndroidBitmap_lockPixels(env, bitmapB, &pixelsB)) < 0 ||
(ret = AndroidBitmap_lockPixels(env, bitmapOut, &pixelsOut)) < 0) {
LOGE("Error! %d", ret);
}
int h = infoA.height;
int w = infoA.width;
int wh = w * h;
int n;
rgba* inputA = (rgba*) pixelsA;
rgba* inputB = (rgba*) pixelsB;
rgba* output = (rgba*) pixelsOut;
rgba pA, pB;
int x, y;
for (y = 0; y < h; y++) {
for (x = 0; x < w; x++) {
n = y * w + x;
pA = inputA[n];
pB = inputB[n];
float alpha = (float) pB.alpha / 255.0;
output[n].red = ChannelBlend_AlphaF(pA.red, pB.red, ChannelBlend_ColorBurn, alpha);
output[n].green = ChannelBlend_AlphaF(pA.green, pB.green, ChannelBlend_ColorBurn, alpha);
output[n].blue = ChannelBlend_AlphaF(pA.blue, pB.blue, ChannelBlend_ColorBurn, alpha);
}
}
// Unlocks everything
AndroidBitmap_unlockPixels(env, bitmapA);
AndroidBitmap_unlockPixels(env, bitmapB);
AndroidBitmap_unlockPixels(env, bitmapOut);
}
Small remark to improve performance: After me make equation for single pixel it will be good to store value in some kind of cache to access it faster without equation next time.

Categories

Resources