How to convert RGB565 to YUV420SP faster on android? - android

I need display a jpeg picture, and convert it to YUV420SP. First I use SkBitmap to parse jpeg and display it, then I use the code below to convert RGB565 to YUV420SP on android, but it spend 75ms to convert a 640*480 RGB565 picture, so anybody know the faster way to convert RGB565 to YUV420SP on android? or faster way to convert jpeg file to YUV420SP on android?
// Convert from RGB to YUV420
int RGB2YUV_YR[256], RGB2YUV_YG[256], RGB2YUV_YB[256];
int RGB2YUV_UR[256], RGB2YUV_UG[256], RGB2YUV_UBVR[256];
int RGB2YUV_VG[256], RGB2YUV_VB[256];
//
// Table used for RGB to YUV420 conversion
//
void InitLookupTable()
{
static bool hasInited = false;
if(hasInited)
return ;
hasInited = true;
int i;
for (i = 0; i < 256; i++)
RGB2YUV_YR[i] = (float) 65.481 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_YG[i] = (float) 128.553 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_YB[i] = (float) 24.966 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UR[i] = (float) 37.797 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UG[i] = (float) 74.203 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_VG[i] = (float) 93.786 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_VB[i] = (float) 18.214 * (i << 8);
for (i = 0; i < 256; i++)
RGB2YUV_UBVR[i] = (float) 112 * (i << 8);
}
int ConvertRGB5652YUV420SP(int w, int h, unsigned char *bmp, unsigned char *yuv)
{
unsigned char *u, *v, *y, *uu, *vv;
unsigned char *pu1, *pu2, *pu3, *pu4;
unsigned char *pv1, *pv2, *pv3, *pv4;
unsigned char rValue = 0, gValue = 0, bValue = 0;
uint16_t* bmpPtr;
int i, j;
printf("ConvertRGB5652YUV420SP begin,w=%d,h=%d,bmp=%p,yuv=%p\n", w, h, bmp, yuv);
struct timeval tpstart,tpend;
gettimeofday(&tpstart,NULL);
InitLookupTable();
gettimeofday(&tpend,NULL);
float timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("InitLookupTable used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
uu = new unsigned char[w * h];
vv = new unsigned char[w * h];
if (uu == NULL || vv == NULL || yuv == NULL)
return 0;
y = yuv;
u = uu;
v = vv;
// Get r,g,b pointers from bmp image data....
bmpPtr = (uint16_t*)bmp;
//Get YUV values for rgb values...
for (i = 0; i < h; i++) {
for (j = 0; j < w; j++) {
uint16_t color = *bmpPtr;
unsigned int r = (color>>11) & 0x1f;
unsigned int g = (color>> 5) & 0x3f;
unsigned int b = (color ) & 0x1f;
rValue = (r<<3) | (r>>2);
gValue = (g<<2) | (g>>4);
bValue = (b<<3) | (b>>2);
*y++ = (RGB2YUV_YR[rValue] + RGB2YUV_YG[gValue] + RGB2YUV_YB[bValue] +
1048576) >> 16;
*u++ = (-RGB2YUV_UR[rValue] - RGB2YUV_UG[gValue] + RGB2YUV_UBVR[bValue] +
8388608) >> 16;
*v++ = (RGB2YUV_UBVR[rValue] - RGB2YUV_VG[gValue] - RGB2YUV_VB[bValue] +
8388608) >> 16;
bmpPtr++;
}
}
gettimeofday(&tpend,NULL);
timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("Get YUV values used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
// Now sample the U & V to obtain YUV 4:2:0 format
// Get the right pointers...
u = yuv + w * h;
v = u + 1;
// For U
pu1 = uu;
pu2 = pu1 + 1;
pu3 = pu1 + w;
pu4 = pu3 + 1;
// For V
pv1 = vv;
pv2 = pv1 + 1;
pv3 = pv1 + w;
pv4 = pv3 + 1;
// Do sampling....
for (i = 0; i < h; i += 2) {
for (j = 0; j < w; j += 2) {
*u = (*pu1 + *pu2 + *pu3 + *pu4) >> 2;
u += 2;
*v = (*pv1 + *pv2 + *pv3 + *pv4) >> 2;
v += 2;
pu1 += 2;
pu2 += 2;
pu3 += 2;
pu4 += 2;
pv1 += 2;
pv2 += 2;
pv3 += 2;
pv4 += 2;
}
pu1 += w;
pu2 += w;
pu3 += w;
pu4 += w;
pv1 += w;
pv2 += w;
pv3 += w;
pv4 += w;
}
gettimeofday(&tpend,NULL);
timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("Do sampling used time=%f\n", timeuse);
gettimeofday(&tpstart,NULL);
delete uu;
delete vv;
return 1;
}
int main(int argc, char **argv) {
unsigned char bmp[640*480*2] = {0};
unsigned char yuv[(640*480*3)/2] = {0};
struct timeval tpstart,tpend;
gettimeofday(&tpstart,NULL);
ConvertRGB5652YUV420SP(640, 480, bmp, yuv);
gettimeofday(&tpend,NULL);
float timeuse=1000000*(tpend.tv_sec-tpstart.tv_sec)+tpend.tv_usec-tpstart.tv_usec;
timeuse/=1000;
printf("ConvertARGB2YUV420SP used time=%f\n", timeuse);
return 0;
}
output on android(armv6):
ConvertRGB5652YUV420SP begin,w=640,h=480,bmp=0xbe7314fc,yuv=0xbe7c74fc
InitLookupTable used time=0.383000
Get YUV values used time=61.394001
Do sampling used time=11.918000
ConvertARGB2YUV420SP used time=74.596001
cpu info:
$ cat /proc/cpuinfo
cat /proc/cpuinfo
Processor : ARMv6-compatible processor rev 5 (v6l)
BogoMIPS : 791.34
Features : swp half thumb fastmult vfp edsp java
CPU implementer : 0x41
CPU architecture: 6TEJ
CPU variant : 0x1
CPU part : 0xb36
CPU revision : 5
Hardware : IMAPX200
Revision : 0000
Serial : 0000000000000000

On ARMv7, use NEON. It will do the job in less than 1ms. (VGA)
If you are stuck with ARMv6, optimize it in ARM assembly. (about 8ms on VGA)
Use fixed-point arithmetic instead of the lookup tables. Get rid of them.
make two masks :
0x001f001f : mask1
0x003f003f : mask2
then load two pixels at once into a 32bit register (which is a lot faster than 16bit read)
and red, mask1, pixel, lsr #11
and grn, mask2, pixel, lsr #5
and blu, mask1, pixel
now you have three registers, each containing two values - one in the lower, and the other in the upper 16 bits.
smulxy instructions will do some miracles from here on. (16bit multiply)
Good luck.
PS : your lookup table isn't that good either. Why are they all in length of 256?
You could reduce them to 32 (r and b related) and 64 (g related) Which will increase the cache hit rate.
Probably that will just do for the targeted 40ms without resorting to assembly.
Yes, cache-misses are THAT painful.

I have found a faster way in skia, it runs about 40ms.
#include "SkColorPriv.h"
#include "SkBitmap.h"
#include "SkCanvas.h"
#include "SkStream.h"
using namespace android;
// taken from jcolor.c in libjpeg
#if 0 // 16bit - precise but slow
#define CYR 19595 // 0.299
#define CYG 38470 // 0.587
#define CYB 7471 // 0.114
#define CUR -11059 // -0.16874
#define CUG -21709 // -0.33126
#define CUB 32768 // 0.5
#define CVR 32768 // 0.5
#define CVG -27439 // -0.41869
#define CVB -5329 // -0.08131
#define CSHIFT 16
#else // 8bit - fast, slightly less precise
#define CYR 77 // 0.299
#define CYG 150 // 0.587
#define CYB 29 // 0.114
#define CUR -43 // -0.16874
#define CUG -85 // -0.33126
#define CUB 128 // 0.5
#define CVR 128 // 0.5
#define CVG -107 // -0.41869
#define CVB -21 // -0.08131
#define CSHIFT 8
#endif
static void rgb2yuv_32(uint8_t dst[], SkPMColor c) {
int r = SkGetPackedR32(c);
int g = SkGetPackedG32(c);
int b = SkGetPackedB32(c);
int y = ( CYR*r + CYG*g + CYB*b ) >> CSHIFT;
int u = ( CUR*r + CUG*g + CUB*b ) >> CSHIFT;
int v = ( CVR*r + CVG*g + CVB*b ) >> CSHIFT;
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_32_x(uint8_t *py, uint8_t *pu, uint8_t *pv, SkPMColor c) {
int r = SkGetPackedR32(c);
int g = SkGetPackedG32(c);
int b = SkGetPackedB32(c);
if(py != NULL){
int y = ( CYR*r + CYG*g + CYB*b ) >> CSHIFT;
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( CUR*r + CUG*g + CUB*b ) >> CSHIFT;
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( CVR*r + CVG*g + CVB*b ) >> CSHIFT;
*pv = SkToU8(v + 128);
}
}
static void rgb2yuv_4444(uint8_t dst[], U16CPU c) {
int r = SkGetPackedR4444(c);
int g = SkGetPackedG4444(c);
int b = SkGetPackedB4444(c);
int y = ( CYR*r + CYG*g + CYB*b ) >> (CSHIFT - 4);
int u = ( CUR*r + CUG*g + CUB*b ) >> (CSHIFT - 4);
int v = ( CVR*r + CVG*g + CVB*b ) >> (CSHIFT - 4);
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_4444_x(uint8_t *py, uint8_t *pu, uint8_t *pv, U16CPU c) {
int r = SkGetPackedR4444(c);
int g = SkGetPackedG4444(c);
int b = SkGetPackedB4444(c);
if(py != NULL){
int y = ( CYR*r + CYG*g + CYB*b ) >> (CSHIFT - 4);
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( CUR*r + CUG*g + CUB*b ) >> (CSHIFT - 4);
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( CVR*r + CVG*g + CVB*b ) >> (CSHIFT - 4);
*pv = SkToU8(v + 128);
}
}
static void rgb2yuv_16(uint8_t dst[], U16CPU c) {
int r = SkGetPackedR16(c);
int g = SkGetPackedG16(c);
int b = SkGetPackedB16(c);
int y = ( 2*CYR*r + CYG*g + 2*CYB*b ) >> (CSHIFT - 2);
int u = ( 2*CUR*r + CUG*g + 2*CUB*b ) >> (CSHIFT - 2);
int v = ( 2*CVR*r + CVG*g + 2*CVB*b ) >> (CSHIFT - 2);
dst[0] = SkToU8(y);
dst[1] = SkToU8(u + 128);
dst[2] = SkToU8(v + 128);
}
static void rgb2yuv_16_x(uint8_t *py, uint8_t *pu, uint8_t *pv, U16CPU c) {
int r = SkGetPackedR16(c);
int g = SkGetPackedG16(c);
int b = SkGetPackedB16(c);
if(py != NULL){
int y = ( 2*CYR*r + CYG*g + 2*CYB*b ) >> (CSHIFT - 2);
*py = SkToU8(y);
}
if(pu != NULL){
int u = ( 2*CUR*r + CUG*g + 2*CUB*b ) >> (CSHIFT - 2);
*pu = SkToU8(u + 128);
}
if(pv != NULL){
int v = ( 2*CVR*r + CVG*g + 2*CVB*b ) >> (CSHIFT - 2);
*pv = SkToU8(v + 128);
}
}
int ConvertRGB5652YUV420SPBySkia(SkBitmap* bmp, unsigned char* dst) {
if(!bmp || !dst || bmp->getConfig() != SkBitmap::kRGB_565_Config)
return -1;
int width = bmp->width();
int height = bmp->height();
void *src = bmp->getPixels();
int src_rowbytes = bmp->rowBytes();
int stride = width;
int dstheight = height;
int i, j;
uint8_t *y_base = (uint8_t *)dst;
uint8_t *cb_base = (uint8_t *)((unsigned int)y_base + stride * dstheight);
uint8_t *cr_base = cb_base + 1;
uint8_t yuv[3];
uint8_t *y = NULL, *cb = NULL, *cr = NULL;
uint16_t *rgb = (uint16_t *)src;
for(i=0; i<height; i++){
rgb = (uint16_t *)((unsigned int)src + i * src_rowbytes);
y = (uint8_t *)((unsigned int)y_base + i * stride);
if((i & 0x1) == 0){
cb = (uint8_t *)((unsigned int)cb_base + ((i>>1) * stride));
cr = cb + 1;
}
for(j=0; j<width; j++){
if(i & 0x1){// valid y and cr
if(j & 0x01){ // only y
rgb2yuv_16_x(y++, NULL, NULL, *rgb++);
}else{ // both y and cr
rgb2yuv_16_x(y++, NULL, cr++, *rgb++);
cr++;
}
}else{// valid y and cb
if(j & 0x01){ // only y
rgb2yuv_16_x(y++, NULL, NULL, *rgb++);
}else{ // both y and cb
rgb2yuv_16_x(y++, cb++, NULL, *rgb++);
cb++;
}
}
}
}
return 0;
}

Related

Unable to change ARGB to YUV420Planar

I'm attempting to use MediaCodec and MediaMuxer to change a series of JPEGs into a mp4.
No matter what I do, I always get a green staticy screen as output on the MP4.
Code Follows:
public class AvcEncoder
{
public bool CanEncode = true;
MediaCodec codec;
MediaMuxer muxer;
MediaFormat format;
public AvcEncoder()
{
codec = MediaCodec.CreateEncoderByType("video/avc");
format = MediaFormat.CreateVideoFormat("video/avc", 720, 480);
format.SetInteger(MediaFormat.KeyBitRate, 700000);
format.SetInteger(MediaFormat.KeyFrameRate, 10);
format.SetInteger(MediaFormat.KeyColorFormat, (int)Android.Media.MediaCodecCapabilities.Formatyuv420planar);
format.SetInteger(MediaFormat.KeyIFrameInterval, 5);
codec.Configure(format, null, null, MediaCodecConfigFlags.Encode);
codec.Start();
Java.IO.File f = new Java.IO.File(Android.OS.Environment.ExternalStorageDirectory, "Parkingdom");
if (!f.Exists())
{
f.Mkdirs();
}
muxer = new MediaMuxer(f.ToString() + "/test.mp4", MuxerOutputType.Mpeg4);
}
public void EncodeFrame(Bitmap image)
{
int mWidth = image.Width;
int mHeight = image.Height;
int[] mIntArray = new int[mWidth * mHeight];
// Copy pixel data from the Bitmap into the 'intArray' array
image.GetPixels(mIntArray, 0, mWidth, 0, 0, mWidth, mHeight);
byte[] byteArray = new byte[mWidth * mHeight * 3 / 2];
// Call to encoding function : convert intArray to Yuv Binary data
EncodeYUV420P(byteArray, mIntArray, mWidth, mHeight);
using (var stream = new MemoryStream())
{
image.Compress(Bitmap.CompressFormat.Png, 100, stream);
byteArray = stream.ToArray();
}
int inputBufferIndex = codec.DequeueInputBuffer(-1);
if (inputBufferIndex >= 0)
{
ByteBuffer buffer = codec.GetInputBuffer(inputBufferIndex);
buffer.Clear();
buffer.Put(byteArray);
codec.QueueInputBuffer(inputBufferIndex, 0, byteArray.Length, 0, 0);
}
}
public void SaveMp4()
{
CanEncode = false;
bool running = true;
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int track = -1;
while (running)
{
int index = codec.DequeueOutputBuffer(bufferInfo, 10000);
if (index == (int)MediaCodecInfoState.OutputFormatChanged)
{
MediaFormat format = codec.OutputFormat;
track = muxer.AddTrack(format);
muxer.Start();
}
else if (index == (int)MediaCodecInfoState.TryAgainLater)
{
break;
}
else if (index >= 0)
{
if ((bufferInfo.Flags & MediaCodecBufferFlags.CodecConfig) != 0)
{
bufferInfo.Size = 0;
}
if (track != -1)
{
ByteBuffer outBuffer = codec.GetOutputBuffer(index);
outBuffer.Position(bufferInfo.Offset);
outBuffer.Limit(bufferInfo.Offset + bufferInfo.Size);
muxer.WriteSampleData(track, outBuffer, bufferInfo);
codec.ReleaseOutputBuffer(index, false);
}
}
}
codec.Stop();
codec.Release();
muxer.Stop();
muxer.Release();
CanEncode = true;
}
void EncodeYUV420P(byte[] yuv420p, int[] argb, int width, int height)
{
int frameSize = width * height;
int chromasize = frameSize / 4;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + chromasize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++)
{
for (int i = 0; i < width; i++)
{
a = (int)(argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420p[yIndex++] = (byte)((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0)
{
yuv420p[uIndex++] = (byte)((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420p[vIndex++] = (byte)((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
}
Each time a new jpeg is generated "EncodeFrame" is called which is supposed to be changing it into a YUV420Planar format for the media codec. The codec I'm testing with doesn't support semiplanar.
In case someone comes across this later I changed
EncodeFrame to use a Surface instead and just used DrawBitmap().
It's slower than the byte copy but is working for my purposes.

How to detect average pixel intensity from live camera preview?

I am building a scanner app, and trying to determine the "preview quality" from the preview callback of the camera. I want to customize the camera's AUTO_FLASH_MODE where it will be turned on if the environment is too dark.
How can I detect if there is a high average of dark pixels? This means (in preview) I am getting darkness and therefore need to turn on the camera's flash light.
Either find out how to access pixel values of your image and calculate the average intensity yourself or use any image processing library to do so.
Dark pixels have low values, bright pixels have high values.
You want to calculate the average of all red, green and blue values divided by three times your pixel count.
Define a threshold for when to turn on the flash, but keep in mind that you have to get a new exposure time then.
Prefer flash over exposure time increase as long exposure times yield higher image noise.
I tried this approach but i think it is taking unnecessary time of processing the bitmap and then get an average screen color,
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
Size cameraResolution = resolution;
PreviewCallback callback = this.callback;
if (cameraResolution != null && callback != null)
{
int format = camera.getParameters().getPreviewFormat();
SourceData source = new SourceData(data, cameraResolution.width, cameraResolution.height, format, getCameraRotation());
callback.onPreview(source);
final int[] rgb = decodeYUV420SP(data, cameraResolution.width, cameraResolution.height);
//Bitmap bmp = decodeBitmap(source.getData());
Bitmap bmp = Bitmap.createBitmap(rgb, cameraResolution.width, cameraResolution.height, Bitmap.Config.ARGB_8888);
if (bmp != null)
{
//bmp = decodeBitmap(source.getData());
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
// bmp.compress(Bitmap.CompressFormat.JPEG, 70, bytes);
Bitmap resizebitmap = Bitmap.createBitmap(bmp,
bmp.getWidth() / 2, bmp.getHeight() / 2, 60, 60);
int color = getAverageColor(resizebitmap);
Log.i("Color Int", color + "");
// int color = resizebitmap.getPixel(resizebitmap.getWidth()/2,resizebitmap.getHeight()/2);
String strColor = String.format("#%06X", 0xFFFFFF & color);
//String colorname = sColorNameMap.get(strColor);
Log.d("strColor", strColor);
Log.i("strColor", color + "");
if(!mIsOn)
{
if (color == -16777216 || color < -16777216)//minimum color code (full dark)
{
mIsOn = true;
setTorch(true);
Log.d("Yahooooo", "" + color);
}
}
Log.i("Pixel Value",
"Top Left pixel: " + Integer.toHexString(color));
}
}
else
{
Log.d(TAG, "Got preview callback, but no handler or resolution available");
}
}
}
private int[] decodeYUV420SP(byte[] yuv420sp, int width, int height)
{
final int frameSize = width * height;
int rgb[]=new int[width*height];
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) &
0xff00) | ((b >> 10) & 0xff);
}
}
return rgb;
}
private int getAverageColor(Bitmap bitmap)
{
int redBucket = 0;
int greenBucket = 0;
int blueBucket = 0;
int pixelCount = 0;
for (int y = 0; y < bitmap.getHeight(); y++) {
for (int x = 0; x < bitmap.getWidth(); x++) {
int c = bitmap.getPixel(x, y);
pixelCount++;
redBucket += Color.red(c);
greenBucket += Color.green(c);
blueBucket += Color.blue(c);
// does alpha matter?
}
}
int averageColor = Color.rgb(redBucket / pixelCount, greenBucket
/ pixelCount, blueBucket / pixelCount);
return averageColor;
}

YUV to RGB and CreateBitmap...which is the format?

I'm not expert with image format. I'm testing frame rate performance of camera.
When I convert data from YUV to RGB, this data which RGB format is? rgb565 or argb8888?
And why createBitmap take a long time? add info to raw data?
This is the rgb code
public int[] YUV_NV21_TO_RGB( byte[] yuv, int width, int height) {
final int frameSize = width * height;
int[] argb = new int[width*height];
final int ii = 0;
final int ij = 0;
final int di = +1;
final int dj = +1;
int a = 0;
for (int i = 0, ci = ii; i < height; ++i, ci += di) {
for (int j = 0, cj = ij; j < width; ++j, cj += dj) {
int y = (0xff & ((int) yuv[ci * width + cj]));
int v = (0xff & ((int) yuv[frameSize + (ci >> 1) * width + (cj & ~1) + 0]));
int u = (0xff & ((int) yuv[frameSize + (ci >> 1) * width + (cj & ~1) + 1]));
y = y < 16 ? 16 : y;
int a0 = 1192 * (y - 16);
int a1 = 1634 * (v - 128);
int a2 = 832 * (v - 128);
int a3 = 400 * (u - 128);
int a4 = 2066 * (u - 128);
int r = (a0 + a1) >> 10;
int g = (a0 - a2 - a3) >> 10;
int b = (a0 + a4) >> 10;
r = r < 0 ? 0 : (r > 255 ? 255 : r);
g = g < 0 ? 0 : (g > 255 ? 255 : g);
b = b < 0 ? 0 : (b > 255 ? 255 : b);
argb[a++] = 0xff000000 | (r << 16) | (g << 8) | b;
}
}
return argb;
}
The problem is that if i use CreateBitmap with RGB_565 option, time is at least 10 ms faster than ARGB8888.
If RGB_565 is a sort of compression (loss of data), should not be the opposite ( createBitmap with ARGB888 faster than RGB_565)?

implement water color effect on image using JNI

i implement some code for water color effect on image in android but it was to slow(it's take more then 2 minute) now i try to implement this in JNI for batter speed ,
hear is my java code for
the inPixels is pixel of Bitmap .
protected int[] filterPixels( int width, int height, int[] inPixels )
{
int levels = 256;
int index = 0;
int[] rHistogram = new int[levels];
int[] gHistogram = new int[levels];
int[] bHistogram = new int[levels];
int[] rTotal = new int[levels];
int[] gTotal = new int[levels];
int[] bTotal = new int[levels];
int[] outPixels = new int[width * height];
for (int y = 0; y < height; y++)
{
for (int x = 0; x < width; x++)
{
for (int i = 0; i < levels; i++)
rHistogram[i] = gHistogram[i] = bHistogram[i] = rTotal[i] = gTotal[i] = bTotal[i] = 0;
for (int row = -range; row <= range; row++)
{
int iy = y+row;
int ioffset;
if (0 <= iy && iy < height)
{
ioffset = iy*width;
for (int col = -range; col <= range; col++)
{
int ix = x+col;
if (0 <= ix && ix < width) {
int rgb = inPixels[ioffset+ix];
int r = (rgb >> 16) & 0xff;
int g = (rgb >> 8) & 0xff;
int b = rgb & 0xff;
int ri = r*levels/256;
int gi = g*levels/256;
int bi = b*levels/256;
rTotal[ri] += r;
gTotal[gi] += g;
bTotal[bi] += b;
rHistogram[ri]++;
gHistogram[gi]++;
bHistogram[bi]++;
}
}
}
}
int r = 0, g = 0, b = 0;
for (int i = 1; i < levels; i++)
{
if (rHistogram[i] > rHistogram[r])
r = i;
if (gHistogram[i] > gHistogram[g])
g = i;
if (bHistogram[i] > bHistogram[b])
b = i;
}
r = rTotal[r] / rHistogram[r];
g = gTotal[g] / gHistogram[g];
b = bTotal[b] / bHistogram[b];
outPixels[index] = (inPixels[index] & 0xff000000) | ( r << 16 ) | ( g << 8 ) | b;
index++;
}
}
return outPixels;
}
**OUTPUT image **
and i try to convert this java code to c code but i don't what is the wrong ,
hear the code for C
void filterPixels( int width, int height, int inPixels[] )
{
int levels = 256;
int index = 0;
int rHistogram [levels];
int gHistogram [levels];
int bHistogram [levels];
int rTotal [levels];
int gTotal [levels];
int bTotal [levels];
int outPixels [width * height];
//Loop Variables
int y ;
int x ;
int i ;
int row ;
int col ;
int j ;
int range = 5 ;
for ( y = 0; y < height; y++)
{
for ( x = 0; x < width; x++)
{
for ( i = 0; i < levels; i++)
rHistogram[i] = gHistogram[i] = bHistogram[i] = rTotal[i] = gTotal[i] = bTotal[i] = 0;
for ( row = -range; row <= range; row++)
{
int iy = y+row;
int ioffset;
if (0 <= iy && iy < height)
{
ioffset = iy*width;
for ( col = -range; col <= range; col++)
{
int ix = x+col;
if (0 <= ix && ix < width) {
int rgb = inPixels[ioffset+ix];
int r = (rgb >> 16) & 0xff;
int g = (rgb >> 8) & 0xff;
int b = rgb & 0xff;
int ri = r*levels/256;
int gi = g*levels/256;
int bi = b*levels/256;
rTotal[ri] += r;
gTotal[gi] += g;
bTotal[bi] += b;
rHistogram[ri]++;
gHistogram[gi]++;
bHistogram[bi]++;
}
}
}
}
int r = 0, g = 0, b = 0;
for ( j = 1; j < levels; j++)
{
if (rHistogram[j] > rHistogram[r])
r = j;
if (gHistogram[j] > gHistogram[g])
g = j;
if (bHistogram[j] > bHistogram[b])
b = j;
}
r = rTotal[r] / rHistogram[r];
g = gTotal[g] / gHistogram[g];
b = bTotal[b] / bHistogram[b];
outPixels[index] = (inPixels[index] & 0xff000000) | ( r << 16 ) | ( g << 8 ) | b;
index++;
}
}
}
i check the pixel value of java code and c code both are same(for same image)
code for call native function from my android activity .
int[] pix = new int[oraginal.getWidth() * oraginal.getHeight()];
Bitmap bitmap = oraginal.copy(oraginal.getConfig(), true);
bitmap.getPixels(pix, 0, bitmap.getWidth(), 0, 0,bitmap.getWidth(), bitmap.getHeight());
filterPixelsJNI(bitmap.getWidth(), bitmap.getHeight(), pix);
bitmap.setPixels(pix, 0, bitmap.getWidth(), 0, 0,bitmap.getWidth(), bitmap.getHeight());
myView.setImageBitmap(bitmap);
this is my first try for JNI so plz help me in this .
UPDATE
public native void filterPixelsJNI( int width, int height, int inPixels[] );
JNI
JNIEXPORT void JNICALL Java_com_testndk_HelloWorldActivity_filterPixelsJNI (JNIEnv * env, jobject obj , jint width,jint height,jint inPixels[]){
filterPixels( width, height, inPixels);
}
filterPixels method witch is call from c code .
There are several problems with your JNI code. The algorithmic part is probably correct, but you're not dealing with the Java array to C array conversion correctly.
First of all, the last argument of Java_com_testndk_HelloWorldActivity_filterPixelsJNI should be of type jintArray, and not jint []. This is how you pass a Java array to C code.
Once you get this array, you can't process it directly, you'll have to convert it to a C array:
JNIEXPORT void JNICALL Java_com_testndk_HelloWorldActivity_filterPixelsJNI (JNIEnv * env, jobject obj , jint width, jint height, jintArray inPixels) {
int *c_inPixels = (*env)->GetIntArrayElements(env, inPixels, NULL);
filterPixels( width, height, c_inPixels);
// passing 0 as the last argument should copy native array to Java array
(*env)->ReleaseIntArrayElements(env, inPixels, c_inPixels, 0);
}
I advise you to look at the JNI documentation, which explains how to deal with arrays: http://docs.oracle.com/javase/1.5.0/docs/guide/jni/spec/functions.html
Note that there are now easier ways of processing Java Bitmap objects using android NDK. See an other of my answers here for details.

Android JavaCV create IplImage from Camera to use with ColorHistogram

I am using JavaCV in Android.
In my code, I have created a ImageComparator(class of OpenCV CookBook
http://code.google.com/p/javacv/source/browse/OpenCV2_Cookbook/src/opencv2_cookbook/chapter04/ImageComparator.scala?repo=examples
http://code.google.com/p/javacv/wiki/OpenCV2_Cookbook_Examples_Chapter_4) Object and use that object to compare images. If I use file from SD card the comparator is working.
File referenceImageFile = new File(absPath1); // Read an image.
IplImage reference = Util.loadOrExit(referenceImageFile,CV_LOAD_IMAGE_COLOR);
comparator = new ImageComparator(reference);
comparator = new ImageComparator(reference);
But from Camera Preview, when I am creating IplImage it is not working. I am getting the following Exception during comparison "score" calculation.
score = referenceComparator.compare(grayImage) / imageSize;
java.lang.RuntimeException: /home/saudet/android/OpenCV-2.4.2/modules/core/src/convert.cpp:1196: error: (-215) i < src.channels() in function void cvSplit(const void*, void*, void*, void*, void*)
For CameraPreview I am using the code from FacePreview to create IplImage.But it create Image in grayScale.
int f = SUBSAMPLING_FACTOR;
if (grayImage == null || grayImage.width() != width / f
|| grayImage.height() != height / f) {
grayImage = IplImage.create(width / f, height / f, IPL_DEPTH_8U, 1);
}
int imageWidth = grayImage.width();
int imageHeight = grayImage.height();
int dataStride = f * width;
int imageStride = grayImage.widthStep();
ByteBuffer imageBuffer = grayImage.getByteBuffer();
for (int y = 0; y < imageHeight; y++) {
int dataLine = y * dataStride;
int imageLine = y * imageStride;
for (int x = 0; x < imageWidth; x++) {
imageBuffer.put(imageLine + x, data[dataLine + f * x]);
}
}
How to create a Color IplImage from Camera to use with ImageComparator?
The below code seems to be working fine.
public void onPreviewFrame(final byte[] data, final Camera camera) {
try {
Camera.Size size = camera.getParameters().getPreviewSize();
processImage(data, size.width, size.height);
camera.addCallbackBuffer(data);
} catch (RuntimeException e) {
// The camera has probably just been released, ignore.
Log.d("Exception", " " + e);
}
}
protected void processImage(byte[] data, int width, int height) {
score.clear();
// First, downsample our image
int f = SUBSAMPLING_FACTOR;
IplImage _4image = IplImage.create(width, height, IPL_DEPTH_8U, f);
int[] _temp = new int[width * height];
if (_4image != null) {
decodeYUV420SP(_temp, data, width, height);
_4image.getIntBuffer().put(_temp);
}
//bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
//bitmap.copyPixelsFromBuffer(_4image.getByteBuffer());
Log.d("CompareAndroid", "processImage");
int imageSize = _4image.width() * _4image.height();
Iterator<ImageComparator> iterator = reference_List.iterator();
// Compute histogram match and normalize by image size.
// 1 means perfect match.
while(iterator.hasNext()){
score.add(((ImageComparator) iterator.next()).compare(_4image) / imageSize);
}
Log.d("CompareImages", "Score Size "+score.size());
postInvalidate();
}
This code seems to be working fine.
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width,
int height) {
int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((b << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((r >> 10) & 0xff);
}
}
}
I haven't tested it, but something like this should work:
IplImage yuvimage = IplImage.create(width, height * 3 / 2, IPL_DEPTH_8U, 2);
IplImage rgbimage = IplImage.create(width, height, IPL_DEPTH_8U, 3);
cvCvtColor(yuvimage, rgbimage, CV_YUV2BGR_NV21);

Categories

Resources