I have to create a HSV Histogram from a ARGB array using RenderScript in Android. This is the first time i am using RenderScript and i am not sure if i made a mistake because the performance is not so good. Creating a HSV histogram from an 1920x1080 bitmap takes between 100 and 150 ms.
The RenderScript code:
#pragma version(1)
#pragma rs java_package_name(com.test.renderscript)
#pragma rs_fp_relaxed
uchar3 bins;
rs_allocation histogramAllocation;
void __attribute__((kernel)) process(uchar4 in) {
float r = in.r / 255.0;
float g = in.g / 255.0;
float b = in.b / 255.0;
// convert rgb to hsv
float minRGB = min( r, min( g, b ) );
float maxRGB = max( r, max( g, b ) );
float deltaRGB = maxRGB - minRGB;
float h = 0.0;
float s = maxRGB == 0 ? 0 : (maxRGB - minRGB) / maxRGB;
float v = maxRGB;
if (deltaRGB != 0) {
if (r == maxRGB) {
h = (g - b) / deltaRGB;
}
else {
if (g == maxRGB) {
h = 2 + (b - r) / deltaRGB;
}
else {
h = 4 + (r - g) / deltaRGB;
}
}
h *= 60;
if (h < 0) { h += 360; }
if (h == 360) { h = 0; }
}
// quantize hsv
uint qh = h / (360.0 / bins.s0);
uint qs = (s * 100) / (101.0 / bins.s1);
uint qv = (v * 100) / (101.0 / bins.s2);
// calculate bin index and update the count at that index
// (v * bin size H * bin size S) + (s * bin size H) + h;
uint binIndex = (qv * bins.s0 * bins.s1) + (qs * bins.s0) + qh;
uint count = rsGetElementAt_uint(histogramAllocation, binIndex);
rsSetElementAt_uint(histogramAllocation, (count + 1), binIndex);
}
void init() {
uint histogramSize = bins.s0 * bins.s1 * bins.s2;
for (int i=0; i < histogramSize; i++) {
rsSetElementAt_uint(histogramAllocation, 0, i);
}
}
The Kotlin code:
class RsCreateHsvHistogram {
fun execute(rs: RenderScript, src: ByteArray, bins: HsvHistogram.Bins = HsvHistogram.Bins()): HsvHistogram {
val start = SystemClock.elapsedRealtimeNanos()
val histogramSize = bins.h * bins.s * bins.v
// create input allocation
val typeIn = Type.Builder(rs, Element.U8_4(rs))
.setX(src.size / 4)
.create()
val allocIn = Allocation.createTyped(rs, typeIn)
allocIn.copyFrom(src)
// create output allocation -> the histogram allocation
val typeOut = Type.Builder(rs, Element.I32(rs))
.setX(histogramSize)
.create()
val allocOut = Allocation.createTyped(rs, typeOut)
// run the render script
val script = ScriptC_create_hsv_histogram(rs)
script._bins = Short3(bins.h, bins.s, bins.v)
script._histogramAllocation = allocOut
script.forEach_process(allocIn)
// copy output allocation to histogram array
val histogramData = IntArray(histogramSize)
allocOut.copyTo(histogramData)
val stop = SystemClock.elapsedRealtimeNanos()
Timber.e("duration => ${(stop-start) / 1000000.0} ms")
return HsvHistogram(histogramData, bins)
}
}
I hope you can help me improve the performance. Do you think HSV histogram creation can be done in about 20ms? Is this realistic?
Related
I trained a model using tensorflow and then converted it to tensorflow-lite format.
Model inference worked absolutely fine on laptop using python.
Then I put the model in Android app and used tensorflowlite interpreter for inference and result was nothing but a full black image.
I ported the code in python to Java as is, still getting this garbage result.
Any idea where I might be going wrong here.
Python Code:
def preprocess(img):
return (img / 255. - 0.5) * 2
def deprocess(img):
return (img + 1) / 2
img_size = 256
frozen_model_filename = os.path.join('model/tflite', 'model.tflite')
image_1 = cv2.resize(imread(image_1), (img_size, img_size))
X_1 = np.expand_dims(preprocess(image_1), 0)
X_1 = X_1.astype(np.float32)
image_2 = cv2.resize(imread(image_2), (img_size, img_size))
X_2 = np.expand_dims(preprocess(image_2), 0)
X_2 = X_2.astype(np.float32)
interpreter = tf.lite.Interpreter(model_path=frozen_model_filename)
interpreter.allocate_tensors()
input_details = interpreter.get_input_details()
output_details = interpreter.get_output_details()
interpreter.set_tensor(input_details[0]['index'], X_1)
interpreter.set_tensor(input_details[1]['index'], X_2)
interpreter.invoke()
Output = interpreter.get_tensor(output_details[0]['index'])
Output = deprocess(Output)
imsave('result_tflite.jpg', Output[0])
Corresponding Java code for Android platform:
private ByteBuffer convertBitmapToByteBuffer(Bitmap bitmap) {
Bitmap resized = Bitmap.createScaledBitmap(bitmap, IMAGE_SIZE, IMAGE_SIZE, false);
ByteBuffer byteBuffer;
if(isQuant) {
byteBuffer = ByteBuffer.allocateDirect(BATCH_SIZE * IMAGE_SIZE * IMAGE_SIZE * PIXEL_SIZE);
} else {
byteBuffer = ByteBuffer.allocateDirect(4 * BATCH_SIZE * IMAGE_SIZE * IMAGE_SIZE * PIXEL_SIZE);
}
byteBuffer.order(ByteOrder.nativeOrder());
int[] intValues = new int[IMAGE_SIZE * IMAGE_SIZE];
resized.getPixels(intValues, 0, resized.getWidth(), 0, 0, resized.getWidth(), resized.getHeight());
int pixel = 0;
byteBuffer.rewind();
for (int i = 0; i < IMAGE_SIZE; ++i) {
for (int j = 0; j < IMAGE_SIZE; ++j) {
final int val = intValues[pixel++];
if(isQuant){
byteBuffer.put((byte) ((val >> 16) & 0xFF));
byteBuffer.put((byte) ((val >> 8) & 0xFF));
byteBuffer.put((byte) (val & 0xFF));
} else {
byteBuffer.putFloat((((val >> 16) & 0xFF) - 0.5f) * 2.0f);
byteBuffer.putFloat((((val >> 8) & 0xFF) - 0.5f) * 2.0f);
byteBuffer.putFloat((((val) & 0xFF ) - 0.5f) * 2.0f);
}
}
}
return byteBuffer;
}
private Bitmap getOutputImage(ByteBuffer output){
output.rewind();
int outputWidth = IMAGE_SIZE;
int outputHeight = IMAGE_SIZE;
Bitmap bitmap = Bitmap.createBitmap(outputWidth, outputHeight, Bitmap.Config.ARGB_8888);
int [] pixels = new int[outputWidth * outputHeight];
for (int i = 0; i < outputWidth * outputHeight; i++) {
int a = 0xFF;
float r = (output.getFloat() + 1) / 2.0f;
float g = (output.getFloat() + 1) / 2.0f;
float b = (output.getFloat() + 1) / 2.0f;
pixels[i] = a << 24 | ((int) r << 16) | ((int) g << 8) | (int) b;
}
bitmap.setPixels(pixels, 0, outputWidth, 0, 0, outputWidth, outputHeight);
return bitmap;
}
private void runInference(){
ByteBuffer byteBufferX1 = convertBitmapToByteBuffer(bitmap_x1);
ByteBuffer byteBufferX2 = convertBitmapToByteBuffer(bitmap_x2);
Object[] inputs = {byteBufferX1, byteBufferX2};
ByteBuffer byteBufferOutput;
if(isQuant) {
byteBufferOutput = ByteBuffer.allocateDirect(BATCH_SIZE * IMAGE_SIZE * IMAGE_SIZE * PIXEL_SIZE);
} else {
byteBufferOutput = ByteBuffer.allocateDirect(4 * BATCH_SIZE * IMAGE_SIZE * IMAGE_SIZE * PIXEL_SIZE);
}
byteBufferOutput.order(ByteOrder.nativeOrder());
byteBufferOutput.rewind();
Map<Integer, Object> outputs = new HashMap<>();
outputs.put(0, byteBufferOutput);
interpreter.runForMultipleInputsOutputs(inputs, outputs);
ByteBuffer out = (ByteBuffer) outputs.get(0);
Bitmap outputBitmap = getOutputImage(out);
// outputBitmap is just a full black image
}
Both Java and Python interpreter are base on C++ implementation so the results should be the same. The error should be in your JAVA code.
Here i think you forgot to multiply and divide to 255.
Im trying to render a video frame using android NDK.
Im using this sample of google Native-Codec NDK sample code and modified it so I can manually display each video frame (non-tunneled).
so I added this code to get the output buffer which is in YUV.
ANativeWindow_setBuffersGeometry(mWindow, bufferWidth, bufferHeight,
WINDOW_FORMAT_RGBA_8888
uint8_t *decodedBuff = AMediaCodec_getOutputBuffer(d->codec, status, &bufSize);
auto format = AMediaCodec_getOutputFormat(d->codec);
LOGV("VOUT: format %s", AMediaFormat_toString(format));
AMediaFormat *myFormat = format;
int32_t w,h;
AMediaFormat_getInt32(myFormat, AMEDIAFORMAT_KEY_HEIGHT, &h);
AMediaFormat_getInt32(myFormat, AMEDIAFORMAT_KEY_WIDTH, &w);
err = ANativeWindow_lock(mWindow, &buffer, nullptr);
and these codes to convert the YUV to RGB and display it using native window.
if (err == 0) {
LOGV("ANativeWindow_lock()");
int width =w;
int height=h;
int const frameSize = width * height;
int *line = reinterpret_cast<int *>(buffer.bits);
for (int y= 0; y < height; y++) {
for (int x = 0; x < width; x++) {
/*accessing YUV420SP elements*/
int indexY = y * width + x;
int indexU = (size + (y / 2) * (width ) + (x / 2) *2);
int indexV = (int) (size + (y / 2) * (width) + (x / 2) * 2 + 1);
/*todo; this conversion to int and then later back to int really isn't required.
There's room for better work here.*/
int Y = 0xFF & decodedBuff[indexY];
int U = 0xFF & decodedBuff[indexU];
int V = 0xFF & decodedBuff[indexV];
/*constants picked up from http://www.fourcc.org/fccyvrgb.php*/
int R = (int) (Y + 1.402f * (V - 128));
int G = (int) (Y - 0.344f * (U - 128) - 0.714f * (V - 128));
int B = (int) (Y + 1.772f * (U - 128));
/*clamping values*/
R = R < 0 ? 0 : R;
G = G < 0 ? 0 : G;
B = B < 0 ? 0 : B;
R = R > 255 ? 255 : R;
G = G > 255 ? 255 : G;
B = B > 255 ? 255 : B;
line[buffer.stride * y + x] = 0xff000000 + (B << 16) + (G << 8) + R;
}
}
ANativeWindow_unlockAndPost(mWindow);
Finally I was able to display a video on my device. Now my problem is the video does not scale to fit the surface view :(
Your thoughts are very much appreciated.
I have to provide a YUV(NV21) byte array to a recognition solution and I'd like, to reduce processing time, to down scale the preview frame.
From solutions gathered here and there on SO, I manage to convert on a 1:1 ratio and I get recognition hits. But if I'd like to scale the intermediate bitmap down, I get no result. Even if I scale it down to 95% only.
Any help would be appreciated.
Thus, every 400-ish ms I take the preview frame to convert it asynchronously. I convert it to ARGB using RenderScript, scale it down and then convert it back.
// Camera callback
#Override
public void onPreviewFrame(byte[] frame, Camera camera) {
if (camera != null) {
// Debounce
if ((System.currentTimeMillis() - mStart) > 400) {
mStart = System.currentTimeMillis();
Camera.Size size = camera.getParameters().getPreviewSize();
new FrameScaleAsyncTask(frame, size.width, size.height).execute();
}
}
if (mCamera != null) {
mCamera.addCallbackBuffer(mBuffer);
}
}
// In FrameScaleAsyncTask
#Override
protected Void doInBackground(Void... params) {
// Create YUV type for in-allocation
Type yuvType = new Type.Builder(mRenderScript, Element.U8(mRenderScript))
.setX(mFrame.length)
.create();
mAllocationIn = Allocation.createTyped(mRenderScript, yuvType, Allocation.USAGE_SCRIPT);
// Create ARGB-8888 type for out-allocation
Type rgbType = new Type.Builder(mRenderScript, Element.RGBA_8888(mRenderScript))
.setX(mWidth)
.setY(mHeight)
.create();
mAllocationOut = Allocation.createTyped(mRenderScript, rgbType, Allocation.USAGE_SCRIPT);
// Copy frame data into in-allocation
mAllocationIn.copyFrom(mFrame);
// Set script input and fire !
mScript.setInput(mAllocationIn);
mScript.forEach(mAllocationOut);
// Create a bitmap of camera preview size (see camera setup) and copy out-allocation to it
Bitmap bitmap = Bitmap.createBitmap(mWidth, mHeight, Bitmap.Config.ARGB_8888);
mAllocationOut.copyTo(bitmap);
// Scale bitmap down
double scaleRatio = 1;
Bitmap scaledBitmap = Bitmap.createScaledBitmap(
bitmap,
(int) (bitmap.getWidth() * scaleRatio),
(int) (bitmap.getHeight() * scaleRatio),
false
);
bitmap.recycle();
int size = scaledBitmap.getRowBytes() * scaledBitmap.getHeight();
int scaledWidth = scaledBitmap.getWidth();
int scaledHeight = scaledBitmap.getHeight();
int[] pixels = new int[scaledWidth * scaledHeight];
// Put bitmap pixels into an int array
scaledBitmap.getPixels(pixels, 0, scaledWidth, 0, 0, scaledWidth, scaledHeight);
mFrame = new byte[pixels.length * 3 / 2];
ImageHelper.encodeYUV420SPAlt(mFrame, pixels, scaledWidth, scaledHeight);
return null;
}
The RGB to YUV algorithm (see : this answer ):
public static void encodeYUV420SPAlt(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
}
index++;
}
}
}
I finally end up resizing my image (as a OpenCV.Mat) directly in C++. This was way easier and faster.
Size size(correctedWidth, correctedHeight);
Mat dst;
resize(image, dst, size);
I am trying to create a wallpaper and am using the HSV conversion in the "android.graphics.color" class. I was very surprised when i realized that a conversion of a created HSV color with a specified hue (0..360) to a rgb color (an integer) and a back conversion to a HSV color will not result in the same hue. This is my code:
int c = Color.HSVToColor(new float[] { 100f, 1, 1 });
float[] f = new float[3];
Color.colorToHSV(c, f);
alert(f[0]);
I am starting with a hue of 100 degree and the result is 99.76471.
I wonder why there is that (in my opinion) relatively big inaccuracy.
But a much bigger problem is, that when you put that value in the code again, the new result decreases again.
int c = Color.HSVToColor(new float[] { 99.76471f, 1, 1 });
float[] f = new float[3];
Color.colorToHSV(c, f);
alert(f[0]);
If I start with 99.76471, I get 99.52941. This is kind of a problem for me.
I did something similar in java with the "java.awt.Color" class where I did not have those problems. Unfortunately, I cannot use this class in android.
This is an interesting problem. It's not avoidable with the android class because of low float precision. However, I found a similar solution written in javascript here.
If it's important enough for you to want to define your own method/class to do the conversions, here is a Java conversion which should give you better precision:
#Size(3)
/** Does the same as {#link android.graphics.Color#colorToHSV(int, float[])} */
public double[] colorToHSV(#ColorInt int color) {
//this line copied vertabim
return rgbToHsv((color >> 16) & 0xFF, (color >> 8) & 0xFF, color & 0xFF);
}
#Size(3)
public double[] rgbToHsv(double r, double g, double b) {
final double max = Math.max(r, Math.max(g, b));
final double min = Math.min(r, Math.min(g, b));
final double diff = max - min;
final double h;
final double s = ((max == 0d)? 0d : diff / max);
final double v = max / 255d;
if (min == max) {
h = 0d;
} else if (r == max) {
double tempH = (g - b) + diff * (g < b ? 6: 0);
tempH /= 6 * diff;
h = tempH;
} else if (g == max) {
double tempH = (b - r) + diff * 2;
tempH /= 6 * diff;
h = tempH;
} else {
double tempH = (r - g) + diff * 4;
tempH /= 6 * diff;
h = tempH;
}
return new double[] { h, s, v };
}
I have to confess ignorance here - I've done quick conversion and not had time to test properly. There might be a more optimal solution, but this should get you started at least.
Don't miss the mirrored procedure from the source link. Next is the translation to the Kotlin lang.
fun hsvToRGB(hsv: DoubleArray): Int {
val i = floor(hsv[0] * 6).toInt()
val f = hsv[0] * 6 - i
val p = hsv[2] * (1 - hsv[1])
val q = hsv[2] * (1 - f * hsv[1])
val t = hsv[2] * (1 - (1 - f) * hsv[1])
val r: Double
val g: Double
val b: Double
when (i % 6) {
0 -> {r = hsv[2]; g = t; b = p}
1 -> {r = q; g = hsv[2]; b = p}
2 -> {r = p; g = hsv[2]; b = t}
3 -> {r = p; g = q; b = hsv[2]}
4 -> {r = t; g = p; b = hsv[2]}
5 -> {r = hsv[2]; g = p; b = q}
else -> {r = 0.0; g = 0.0; b = 0.0}
}
return Color.rgb((r * 255).roundToInt(), (g * 255).roundToInt(), (b * 255).roundToInt())
}
fun rgbToHSV(color: Int, target: DoubleArray) {
val r = Color.red(color).toDouble()
val g = Color.green(color).toDouble()
val b = Color.blue(color).toDouble()
val max = kotlin.math.max(r, kotlin.math.max(g, b))
val min = kotlin.math.min(r, kotlin.math.min(g, b))
val diff = max - min
target[1] = if (max == 0.0) {0.0} else {diff / max}
target[2] = max / 255.0
target[0] = if (min == max) {
0.0
} else if (r == max) {
var tempH = (g - b) + diff * if (g < b) { 6} else {0}
tempH /= 6 * diff
tempH
} else if (g == max) {
var tempH = (b - r) + diff * 2
tempH /= 6 * diff
tempH
} else {
var tempH = (r - g) + diff * 4
tempH /= 6 * diff
tempH
}
}
My Problem is: I've set up a camera in Android and receive the preview data by using an onPreviewFrame-listener which passes me an byte[] array containing the image data in the default android YUV-format (device does not support R5G6B5-format). Each pixel consists of 12bits which makes the thing a little tricky. Now what I want to do is converting the YUV-data into ARGB-data in order to do image processing with it. This has to be done with renderscript, in order to maintain a high performance.
My idea was to pass two pixels in one element (which would be 24bits = 3 bytes) and then return two ARGB pixels. The problem is, that in Renderscript a u8_3 (a 3dimensional 8bit vector) is stored in 32bit, which means that the last 8 bits are unused. But when copying the image data into the allocation all of the 32bits are used, so the last 8bit get lost. Even if I used a 32bit input data, the last 8bit are useless, because they're only 2/3 of a pixel. When defining an element consisting a 3-byte-array it actually has a real size of 3 bytes. But then the Allocation.copyFrom()-method doesn't fill the in-Allocation with data, argueing it doesn't has the right data type to be filled with a byte[].
The renderscript documentation states, that there is a ScriptIntrinsicYuvToRGB which should do exactly that in API Level 17. But in fact the class doesn't exist. I've downloaded API Level 17 even though it seems not to be downloadable any more. Does anyone have any information about it? Does anyone have ever tried out a ScriptIntrinsic?
So in conclusion my question is: How to convert the camera data into ARGB data fast, hardwareaccelerated?
That's how to do it in Dalvik VM (found the code somewhere online, it works):
#SuppressWarnings("unused")
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
I'm sure you will find the LivePreview test application interesting ... it's part of the Android source code in the latest Jelly Bean (MR1). It implements a camera preview and uses ScriptIntrinsicYuvToRgb to convert the preview data with Renderscript. You can browse the source online here:
LivePreview
I was not able to get running ScriptInstrinsicYuvToRgb, so I decided to write my own RS solution.
Here's ready script (named yuv.rs):
#pragma version(1)
#pragma rs java_package_name(com.package.name)
rs_allocation gIn;
int width;
int height;
int frameSize;
void yuvToRgb(const uchar *v_in, uchar4 *v_out, const void *usrData, uint32_t x, uint32_t y) {
uchar yp = rsGetElementAtYuv_uchar_Y(gIn, x, y) & 0xFF;
int index = frameSize + (x & (~1)) + (( y>>1) * width );
int v = (int)( rsGetElementAt_uchar(gIn, index) & 0xFF ) -128;
int u = (int)( rsGetElementAt_uchar(gIn, index+1) & 0xFF ) -128;
int r = (int) (1.164f * yp + 1.596f * v );
int g = (int) (1.164f * yp - 0.813f * v - 0.391f * u);
int b = (int) (1.164f * yp + 2.018f * u );
r = r>255? 255 : r<0 ? 0 : r;
g = g>255? 255 : g<0 ? 0 : g;
b = b>255? 255 : b<0 ? 0 : b;
uchar4 res4;
res4.r = (uchar)r;
res4.g = (uchar)g;
res4.b = (uchar)b;
res4.a = 0xFF;
*v_out = res4;
}
Don't forget to set camera preview format to NV21:
Parameters cameraParameters = camera.getParameters();
cameraParameters.setPreviewFormat(ImageFormat.NV21);
// Other camera init stuff: preview size, framerate, etc.
camera.setParameters(cameraParameters);
Allocations initialization and script usage:
// Somewhere in initialization section
// w and h are variables for selected camera preview size
rs = RenderScript.create(this);
Type.Builder tbIn = new Type.Builder(rs, Element.U8(rs));
tbIn.setX(w);
tbIn.setY(h);
tbIn.setYuvFormat(ImageFormat.NV21);
Type.Builder tbOut = new Type.Builder(rs, Element.RGBA_8888(rs));
tbOut.setX(w);
tbOut.setY(h);
inData = Allocation.createTyped(rs, tbIn.create(), Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT & Allocation.USAGE_SHARED);
outData = Allocation.createTyped(rs, tbOut.create(), Allocation.MipmapControl.MIPMAP_NONE, Allocation.USAGE_SCRIPT & Allocation.USAGE_SHARED);
outputBitmap = Bitmap.createBitmap(w, h, Bitmap.Config.ARGB_8888);
yuvScript = new ScriptC_yuv(rs);
yuvScript.set_gIn(inData);
yuvScript.set_width(w);
yuvScript.set_height(h);
yuvScript.set_frameSize(previewSize);
//.....
Camera callback method:
public void onPreviewFrame(byte[] data, Camera camera) {
// In your camera callback, data
inData.copyFrom(data);
yuvScript.forEach_yuvToRgb(inData, outData);
outData.copyTo(outputBitmap);
// draw your bitmap where you want to
// .....
}
For anyone who didn't know, RenderScript is now in the Android Support Library, including intrinsics.
http://android-developers.blogspot.com.au/2013/09/renderscript-in-android-support-library.html
http://android-developers.blogspot.com.au/2013/08/renderscript-intrinsics.html
We now have the new renderscript-intrinsics-replacement-toolkit to do it. First, build and import the renderscript module to your project and add it as a dependency to your app module. Then, go to Toolkit.kt and add the following:
fun toNv21(image: Image): ByteArray? {
val nv21 = ByteArray((image.width * image.height * 1.5f).toInt())
return if (!nativeYuv420toNv21(
nativeHandle,
image.width,
image.height,
image.planes[0].buffer, // Y buffer
image.planes[1].buffer, // U buffer
image.planes[2].buffer, // V buffer
image.planes[0].pixelStride, // Y pixel stride
image.planes[1].pixelStride, // U/V pixel stride
image.planes[0].rowStride, // Y row stride
image.planes[1].rowStride, // U/V row stride
nv21
)
) {
null
} else nv21
}
private external fun nativeYuv420toNv21(
nativeHandle: Long,
imageWidth: Int,
imageHeight: Int,
yByteBuffer: ByteBuffer,
uByteBuffer: ByteBuffer,
vByteBuffer: ByteBuffer,
yPixelStride: Int,
uvPixelStride: Int,
yRowStride: Int,
uvRowStride: Int,
nv21Output: ByteArray
): Boolean
Now, go to JniEntryPoints.cpp and add the following:
extern "C" JNIEXPORT jboolean JNICALL Java_com_google_android_renderscript_Toolkit_nativeYuv420toNv21(
JNIEnv *env, jobject/*thiz*/, jlong native_handle,
jint image_width, jint image_height, jobject y_byte_buffer,
jobject u_byte_buffer, jobject v_byte_buffer, jint y_pixel_stride,
jint uv_pixel_stride, jint y_row_stride, jint uv_row_stride,
jbyteArray nv21_array) {
auto y_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(y_byte_buffer));
auto u_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(u_byte_buffer));
auto v_buffer = static_cast<jbyte*>(env->GetDirectBufferAddress(v_byte_buffer));
jbyte* nv21 = env->GetByteArrayElements(nv21_array, nullptr);
if (nv21 == nullptr || y_buffer == nullptr || u_buffer == nullptr
|| v_buffer == nullptr) {
// Log this.
return false;
}
RenderScriptToolkit* toolkit = reinterpret_cast<RenderScriptToolkit*>(native_handle);
toolkit->yuv420toNv21(image_width, image_height, y_buffer, u_buffer, v_buffer,
y_pixel_stride, uv_pixel_stride, y_row_stride, uv_row_stride,
nv21);
env->ReleaseByteArrayElements(nv21_array, nv21, 0);
return true;
}
Go to YuvToRgb.cpp and add the following:
void RenderScriptToolkit::yuv420toNv21(int image_width, int image_height, const int8_t* y_buffer,
const int8_t* u_buffer, const int8_t* v_buffer, int y_pixel_stride,
int uv_pixel_stride, int y_row_stride, int uv_row_stride,
int8_t *nv21) {
// Copy Y channel.
for(int y = 0; y < image_height; ++y) {
int destOffset = image_width * y;
int yOffset = y * y_row_stride;
memcpy(nv21 + destOffset, y_buffer + yOffset, image_width);
}
if (v_buffer - u_buffer == sizeof(int8_t)) {
// format = nv21
// TODO: If the format is VUVUVU & pixel stride == 1 we can simply the copy
// with memcpy. In Android Camera2 I have mostly come across UVUVUV packaging
// though.
}
// Copy UV Channel.
int idUV = image_width * image_height;
int uv_width = image_width / 2;
int uv_height = image_height / 2;
for(int y = 0; y < uv_height; ++y) {
int uvOffset = y * uv_row_stride;
for (int x = 0; x < uv_width; ++x) {
int bufferIndex = uvOffset + (x * uv_pixel_stride);
// V channel.
nv21[idUV++] = v_buffer[bufferIndex];
// U channel.
nv21[idUV++] = u_buffer[bufferIndex];
}
}
}
Finally, go to RenderscriptToolkit.h and add the following:
/**
* https://blog.minhazav.dev/how-to-use-renderscript-to-convert-YUV_420_888-yuv-image-to-bitmap/#tobitmapimage-image-method
* #param image_width width of the image you want to convert to byte array
* #param image_height height of the image you want to convert to byte array
* #param y_buffer Y buffer
* #param u_buffer U buffer
* #param v_buffer V buffer
* #param y_pixel_stride Y pixel stride
* #param uv_pixel_stride UV pixel stride
* #param y_row_stride Y row stride
* #param uv_row_stride UV row stride
* #param nv21 the output byte array
*/
void yuv420toNv21(int image_width, int image_height, const int8_t* y_buffer,
const int8_t* u_buffer, const int8_t* v_buffer, int y_pixel_stride,
int uv_pixel_stride, int y_row_stride, int uv_row_stride,
int8_t *nv21);
You are now ready to harness the full power of renderscript. Below, I am providing an example with the ARCore Camera Image object (replace the first line with whatever code gives you your camera image):
val cameraImage = arFrame.frame.acquireCameraImage()
val width = cameraImage.width
val height = cameraImage.height
val byteArray = Toolkit.toNv21(cameraImage)
byteArray?.let {
Toolkit.yuvToRgbBitmap(
byteArray,
width,
height,
YuvFormat.NV21
).let { bitmap ->
saveBitmapToDevice(
name,
session,
bitmap,
context
)}}