Creating a new Bitmap with Pixel Data in JNI? - android

I've the below code to create a BitMap (Just a Black / Gray Image) in the JNI with 'ARGB_8888' configuration. But when I dump the content of the Bitmap in the Java code, I'm able to see only the configurations, but not the Pixel Data in the Bitmap.
JNI Code
// Image Details
int imgWidth = 128;
int imgHeight = 128;
int numPix = imgWidth * imgHeight;
// Creaing Bitmap Config Class
jclass bmpCfgCls = env->FindClass("android/graphics/Bitmap$Config");
jmethodID bmpClsValueOfMid = env->GetStaticMethodID(bmpCfgCls, "valueOf", "(Ljava/lang/String;)Landroid/graphics/Bitmap$Config;");
jobject jBmpCfg = env->CallStaticObjectMethod(bmpCfgCls, bmpClsValueOfMid, env->NewStringUTF("ARGB_8888"));
// Creating a Bitmap Class
jclass bmpCls = env->FindClass("android/graphics/Bitmap");
jmethodID createBitmapMid = env->GetStaticMethodID(bmpCls, "createBitmap", "(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
jBmpObj = env->CallStaticObjectMethod(bmpCls, createBitmapMid, imgWidth, imgHeight, jBmpCfg);
// Creating Pixel Data
int triplicateLen = numPix * 4;
char *tripPixData = (char*)malloc(triplicateLen);
for (int lc = 0; lc < triplicateLen; lc++)
{
// Gray / Black Image
if (0 == (lc%4))
tripPixData[lc] = 0x7F; // Alpha
else
tripPixData[lc] = 0x00; // RGB
}
// Setting Pixels in Bitmap
jByteArr = env->NewByteArray(triplicateLen);
env->SetByteArrayRegion(jByteArr, 0, triplicateLen, (jbyte*)tripPixData);
jmethodID setPixelsMid = env->GetMethodID(bmpCls, "setPixels", "([IIIIIII)V");
env->CallVoidMethod(jBmpObj, setPixelsMid, (jintArray)jByteArr, 0, imgWidth, 0, 0, imgWidth, imgHeight);
free(tripPixData);
// Return BitMap Object
return jBmpObj;
In JAVA (Output)
// Checking the Configuration / Image Details
jBmpObj.getWidth() - 128
jBmpObj.getHeight() - 128
jBmpObj.getRowBytes() - 512
jBmpObj.getConfig() - ARGB 8888
// Getting Pixel Data
imgPixs = new int[jBmpObj.getWidth() * jBmpObj.getHeight()];
jBmpObj.getPixels(imgPixs, 0, jBmpObj.getWidth(), 0, 0, jBmpObj.getWidth(), jBmpObj.getHeight());
// Running a Loop on the imgPixs
imgPixs[<0 - imgPixs.lenght>] - 0 (Every Pixel Data)
I used the same concept to create a Bitmap in the Java Code, and it works fine (Even I'm able to see the image). But I want the logic to be in the JNI part and not in Java Code. So I tried the above logic and it failed in setting the Pixel Data.
Any input in fixing this issue will be really helpful,..

Full working example:
jclass bitmapConfig = jniEnv->FindClass("android/graphics/Bitmap$Config");
jfieldID rgba8888FieldID = jniEnv->GetStaticFieldID(bitmapConfig, "ARGB_8888", "Landroid/graphics/Bitmap$Config;");
jobject rgba8888Obj = jniEnv->GetStaticObjectField(bitmapConfig, rgba8888FieldID);
jclass bitmapClass = jniEnv->FindClass("android/graphics/Bitmap");
jmethodID createBitmapMethodID = jniEnv->GetStaticMethodID(bitmapClass,"createBitmap", "(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
jobject bitmapObj = jniEnv->CallStaticObjectMethod(bitmapClass, createBitmapMethodID, _width, _height, rgba8888Obj);
jintArray pixels = jniEnv->NewIntArray(_width * _height);
for (int i = 0; i < _width * _height; i++)
{
unsigned char red = bitmap[i*4];
unsigned char green = bitmap[i*4 + 1];
unsigned char blue = bitmap[i*4 + 2];
unsigned char alpha = bitmap[i*4 + 3];
int currentPixel = (alpha << 24) | (red << 16) | (green << 8) | (blue);
jniEnv->SetIntArrayRegion(pixels, i, 1, &currentPixel);
}
jmethodID setPixelsMid = jniEnv->GetMethodID(bitmapClass, "setPixels", "([IIIIIII)V");
jniEnv->CallVoidMethod(bitmapObj, setPixelsMid, pixels, 0, _width, 0, 0, _width, _height);
where bitmap is unsigned char*.

You cannot cast byte[] to int[] in Java, therefore you cannot cast it in JNI. But you can cast char* to int*, so you can simply use your tripPixData to fill a new jjintArray.

IN Android each pixel represented as 0xFFFFFFFF ie ARGB.
0xFF referes most significamt 8 bits of given data.
From your snippet, where you are getting soure image data? But i have solved this issue
by using following code base.i hope this ll help you.
// Creating Pixel Data
unsigned char* rawData = //your raw data
**Note**: here you have get each r,g & b component as 8 bit data //If it is rgb image,if it
is monochrome you can use raw data
int triplicateLen = imgheight * imgwidth;
int *tripPixData = (int*) malloc(triplicateLen * sizeof(int));
if(rgb){
for (int lc = 0; lc < triplicateLen ; lc++){
tripPixData [lc] = (0xFF << 24) | (r[lc] << 16) | (g[lc] << 8) | b[lc];
}
}else{
for (int lc = 0; lc < triplicateLen ; lc++){
tripPixData [lc] = (0xFF << 24) | (rawData [lc] << 16) | (rawData [lc] << 8) | rawData [lc];
}
}

Related

Need to increment red pixel values in native c++

in android with jni i have a cpp code to change or increment red pixel values with the help of bitmap data passed from android
Java_com_journaldev_androidjnibasics_MainActivity_sendMyBitmap(JNIEnv *env, jobject thiz,
jobject bitmap) {
AndroidBitmapInfo info;
int ret;
if ((ret = AndroidBitmap_getInfo(env, bitmap, &info)) < 0) {
return NULL;
}
if (info.format != ANDROID_BITMAP_FORMAT_RGBA_8888) {
return NULL;
}
//
//read pixels of bitmap into native memory :
//
void *bitmapPixels;
if ((ret = AndroidBitmap_lockPixels(env, bitmap, &bitmapPixels)) < 0) {
return NULL;
}
uint32_t *src = (uint32_t *) bitmapPixels;
uint32_t *tempPixels = new uint32_t[info.height * info.width];
int stride = info.stride;
int pixelsCount = info.height * info.width;
int x, y, red, green, blue;
for (y=0;y<info.height;y++) {
uint32_t * line = (uint32_t *)bitmapPixels;
for (x=0;x<info.width;x++) {
blue = (int) ((line[x] & 0xFF0000) >> 16);
green = (int)((line[x] & 0x00FF00) >> 8);
red = (int) (line[x] & 0x0000FF);
//just set it to all be red for testing
red = 255;
green = 0;
blue = 0;
//why is the image totally blue??
line[x] =
((blue<< 16) & 0xFF0000) |
((green << 8) & 0x00FF00) |
(red & 0x0000FF);
}
bitmapPixels = (char *)bitmapPixels + info.stride;
}
memcpy(tempPixels, src, sizeof(uint32_t) * pixelsCount);
AndroidBitmap_unlockPixels(env, bitmap);
//
//recycle bitmap - using bitmap.recycle()
//
jclass bitmapCls = env->GetObjectClass(bitmap);
jmethodID recycleFunction = env->GetMethodID(bitmapCls, "recycle", "()V");
if (recycleFunction == 0) {
return NULL;
}
env->CallVoidMethod(bitmap, recycleFunction);
//
//creating a new bitmap to put the pixels into it - using Bitmap Bitmap.createBitmap (int width, int height, Bitmap.Config config) :
//
jmethodID createBitmapFunction = env->GetStaticMethodID(bitmapCls, "createBitmap",
"(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
jstring configName = env->NewStringUTF("ARGB_8888");
jclass bitmapConfigClass = env->FindClass("android/graphics/Bitmap$Config");
jmethodID valueOfBitmapConfigFunction = env->GetStaticMethodID(bitmapConfigClass, "valueOf",
"(Ljava/lang/String;)Landroid/graphics/Bitmap$Config;");
jobject bitmapConfig = env->CallStaticObjectMethod(bitmapConfigClass,
valueOfBitmapConfigFunction, configName);
jobject newBitmap = env->CallStaticObjectMethod(bitmapCls, createBitmapFunction, info.height,
info.width, bitmapConfig);
//
// putting the pixels into the new bitmap:
//
if ((ret = AndroidBitmap_lockPixels(env, newBitmap, &bitmapPixels)) < 0) {
return NULL;
}
uint32_t *newBitmapPixels = (uint32_t *) bitmapPixels;
int whereToPut = 0;
for (int x = info.width - 1; x >= 0; --x)
for (int y = 0; y < info.height; ++y) {
uint32_t pixel = tempPixels[info.width * y + x];
newBitmapPixels[whereToPut++] = pixel;
}
AndroidBitmap_unlockPixels(env, newBitmap);
delete[] tempPixels;
return newBitmap;
}
Here after this process, the image getting fully transparent or white colour. can anyone hep me out to do this. My aim is to change the value of R (red) pixel in this bitmap data. thanks in advance
//-------------------------------------------------------------------------------------------------
// header file byte_masks.h
// two constexpr functions that will write out the shift and mask functions at compile time
// from_byte<0> will be the same as (value & 0x000000FF) >> 0;
// from_byte<1> will be the same as (value & 0x0000FF00) >> 8;
// from_byte<2> will be the same as (value & 0x00FF0000) >> 16;
// from_byte<3> will be the same as (value & 0xFF000000) >> 24;
#pragma once
#include <cstdint>
template<size_t N>
constexpr auto from_byte(std::uint32_t value)
{
const std::uint32_t shift = 8 * N;
const std::uint32_t mask = 0xFF << shift;
std::uint32_t retval{ (value & mask) >> shift };
return static_cast<std::uint8_t>(retval);
}
// to_byte<1> will be the same as value << 8 etc...
template<size_t N>
constexpr auto to_byte(std::uint8_t value)
{
const std::uint32_t shift = 8 * N;
return static_cast<std::uint32_t>(value << shift);
}
//-------------------------------------------------------------------------------------------------
// header file color_t.h
#pragma once
#include <cstdint>
struct color_t
{
static color_t from_argb(std::uint32_t pixel);
static color_t from_bgra(std::uint32_t pixel);
std::uint32_t to_argb_value();
std::uint32_t to_bgra_value();
std::uint8_t alpha = 0;
std::uint8_t red = 0;
std::uint8_t green = 0;
std::uint8_t blue = 0;
};
//-------------------------------------------------------------------------------------------------
// source file color_t.cpp
#include<color_t.h>
#include <byte_masks.h>
// this is basically the logic you used reading the data as ARGB
// to create a color from an integer value
color_t color_t::from_argb(std::uint32_t pixel)
{
color_t color{};
color.alpha = from_byte<3>(pixel);
color.red = from_byte<2>(pixel);
color.green = from_byte<1>(pixel);
color.blue = from_byte<0>(pixel);
return color;
}
// But your bitmap data has a different order for alpha, red, green, blue!!!
// ANDROID_BITMAP_FORMAT_RGBA_8888
color_t color_t::from_bgra(std::uint32_t pixel)
{
color_t color{};
color.blue = from_byte<3>(pixel);
color.green = from_byte<2>(pixel);
color.red = from_byte<1>(pixel);
color.alpha = from_byte<0>(pixel);
return color;
}
std::uint32_t color_t::to_argb_value()
{
return (to_byte<3>(alpha) | to_byte<2>(red) | to_byte<1>(green) | to_byte<0>(blue));
}
std::uint32_t color_t::to_bgra_value()
{
return (to_byte<3>(blue) | to_byte<2>(green) | to_byte<1>(red) | to_byte<0>(alpha));
}
//-------------------------------------------------------------------------------------------------
// my main.cpp, but use the color_t functions from below in your code
#include <cassert>
#include <color_t.h>
int main()
{
// two lines just to simulate a bit of your code
std::uint32_t line[]{ 0x00000, 0x11223344 };
const size_t x = 1;
// now it's easy to get the color and change it.
// this will use the blue, green, red alpha order matching your
// ANDROID_BITMAP_FORMAT_RGBA_8888 format.
auto color = color_t::from_bgra(line[x]);
color.red = 255;
// also note that by splitting out code into smaller functions
// it becomes much easier to read (specially for other people)
line[x] = color.to_bgra_value();
assert(to_byte<1>(line[x]) == 255);
}
I see several mistakes in your code:
You read the bitmap as BGRA_8888 into tempPixels and use tempPixels as source for a new ARGB_8888 buffer. You should make sure both buffers have the same format OR flip the pixel component order.
The incoming bitmap has a stride (=length of a row in bytes) that may not be equal to 4 * width. This means you should multiply info.height with info.stride instead. Possibly multiplied by 4, I don't know if the stride is documented to be in pixels or in bytes.
As I said, the input pixel format is BGRA, but you completely ignore the A component. That makes the output fully transparent. I suggest using a struct { uint8_t b,g,r,a; } pixel to disassemble the pixels and manipulate individual elements instead.
Finally, is there a good reason you cannot manipulate the incoming Bitmap instead of creating a new one and making two copies?

Custom byteArray data to WebRTC videoTrack

I need to use WebRTC for android to send specific cropped(face) video to the videoChannel. I was able manipulate Camera1Session class of WebRTC to get the face cropped. Right now I am setting it to an ImageView.
listenForBytebufferFrames() of Camera1Session.java
private void listenForBytebufferFrames() {
this.camera.setPreviewCallbackWithBuffer(new PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera callbackCamera) {
Camera1Session.this.checkIsOnCameraThread();
if(callbackCamera != Camera1Session.this.camera) {
Logging.e("Camera1Session", "Callback from a different camera. This should never happen.");
} else if(Camera1Session.this.state != Camera1Session.SessionState.RUNNING) {
Logging.d("Camera1Session", "Bytebuffer frame captured but camera is no longer running.");
} else {
mFrameProcessor.setNextFrame(data, callbackCamera);
long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());
if(!Camera1Session.this.firstFrameReported) {
int startTimeMs = (int)TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - Camera1Session.this.constructionTimeNs);
Camera1Session.camera1StartTimeMsHistogram.addSample(startTimeMs);
Camera1Session.this.firstFrameReported = true;
}
ByteBuffer byteBuffer1 = ByteBuffer.wrap(data);
Frame outputFrame = new Frame.Builder()
.setImageData(byteBuffer1,
Camera1Session.this.captureFormat.width,
Camera1Session.this.captureFormat.height,
ImageFormat.NV21)
.setTimestampMillis(mFrameProcessor.mPendingTimeMillis)
.setId(mFrameProcessor.mPendingFrameId)
.setRotation(3)
.build();
int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
if (detectedFaces.size() > 0) {
Face face = detectedFaces.valueAt(0);
ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
byte[] byteBuffer = byteBufferRaw.array();
YuvImage yuvimage = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
//My crop logic to get face co-ordinates
yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
final byte[] jpegArray = baos.toByteArray();
Bitmap bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
Activity currentActivity = getActivity();
if (currentActivity instanceof CallActivity) {
((CallActivity) currentActivity).setBitmapToImageView(bitmap); //face on ImageView is set just fine
}
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
} else {
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
}
}
}
});
}
jpegArray is the final byteArray that I need to stream via WebRTC, which I tried with something like this:
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, jpegArray, (int) face.getWidth(), (int) face.getHeight(), Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(jpegArray);
Setting them up like this gives me following error:
../../webrtc/sdk/android/src/jni/androidvideotracksource.cc line 82
Check failed: length >= width * height + 2 * uv_width * ((height + 1) / 2) (2630 vs. 460800)
Which I assume is because androidvideotracksource does not get the same length of byteArray that it expects, since the frame is cropped now.
Could someone point me in the direction of how to achieve it? Is this the correct way/place to manipulate the data and feed into the videoTrack?
Edit:bitmap of byteArray data does not give me a camera preview on ImageView, unlike byteArray jpegArray. Maybe because they are packed differently?
Can we use WebRTC's Datachannel to exchang custom data ie cropped face "image" in your case and do the respective calculation at receiving end using any third party library ie OpenGL etc? Reason I am suggesting is that the WebRTC Video feed received from channel is a stream in real time not a bytearray . WebRTC Video by its inherent architecture isn't meant to crop video at other hand. If we want to crop or augment video we have to use any ar library to fulfill this job.
We can always leverage WebRTC's Data channel to exchange customized data. Using Video channel for the same is not recommended because it's real time stream not the bytearray.Please revert in case of any concern.
WebRTC in particular and video streaming in general presumes that the video has fixed dimensions. If you want to crop the detected face, your options are either to have pad the cropped image with e.g. black pixels (WebRTC does not use transparency), and crop the video on the receiver side, or, if you don't have control over the receiver, resize the cropped region to fill the expected width * height frame (you should also keep the expected aspect ratio).
Note that JPEG compress/decompress that you use to crop the original is far from efficient. Some other options can be found in Image crop and resize in Android.
Okay, this was definitely a problem of how the original byte[] data was packed and the way byte[] jpegArray was packed. Changing the way of packing this and scaling it as AlexCohn suggested worked for me. I found help from other post on StackOverflow on way to pack it. This is the code for it:
private byte[] getNV21(int left, int top, int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, left, top, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
private void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}`
I pass this byte[] data to onByteBufferFrameCaptured and callback:
Camera1Session.this.events.onByteBufferFrameCaptured(
Camera1Session.this,
data,
w,
h,
Camera1Session.this.getFrameOrientation(),
captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
Prior to this, I had to scale the bitmap which is pretty straight forward:
int width = bitmapToScale.getWidth();
int height = bitmapToScale.getHeight();
Matrix matrix = new Matrix();
matrix.postScale(newWidth / width, newHeight / height);
Bitmap scaledBitmap = Bitmap.createBitmap(bitmapToScale, 0, 0, bitmapToScale.getWidth(), bitmapToScale.getHeight(), matrix, true);

Android byte array to Bitmap How to

How can I convert byte array received using socket.
The C++ client send image data which is of type uchar.
At the android side I am receiving this uchar array as byte[] which is ranges from -128 to +127.
What I wanted to do is that receives this data and display it. For that I was trying to convert to Bitmap using BitmapFactory.decodeByteArray(), but no luck I am getting null Bitmap. Am I doing right or any other method available.
Thanks in advance....
From the comments to the answers above, it seems like you want to create a Bitmap object from a stream of RGB values, not from any image format like PNG or JPEG.
This probably means that you know the image size already. In this case, you could do something like this:
byte[] rgbData = ... // From your server
int nrOfPixels = rgbData.length / 3; // Three bytes per pixel.
int pixels[] = new int[nrOfPixels];
for(int i = 0; i < nrOfPixels; i++) {
int r = data[3*i];
int g = data[3*i + 1];
int b = data[3*i + 2];
pixels[i] = Color.rgb(r,g,b);
}
Bitmap bitmap = Bitmap.createBitmap(pixels, width, height, Bitmap.Config.ARGB_8888);
I've been using it like below in one of my projects and so far it's been pretty solid. I'm not sure how picky it is as far as it not being compressed as a PNG though.
byte[] bytesImage;
Bitmap bmpOld; // Contains original Bitmap
Bitmap bmpNew;
ByteArrayOutputStream baoStream = new ByteArrayOutputStream();
bmpOld.compress(Bitmap.CompressFormat.PNG, 100, baoStream);
bytesImage = baoStream.toByteArray();
bmpNew = BitmapFactory.decodeByteArray(bytesImage, 0, bytesImage.length);
edit: I've adapted the code from this post to use RGB, so the code below should work for you. I haven't had a chance to test it yet so it may need some adjusting.
Byte[] bytesImage = {0,1,2, 0,1,2, 0,1,2, 0,1,2};
int intByteCount = bytesImage.length;
int[] intColors = new int[intByteCount / 3];
int intWidth = 2;
int intHeight = 2;
final int intAlpha = 255;
if ((intByteCount / 3) != (intWidth * intHeight)) {
throw new ArrayStoreException();
}
for (int intIndex = 0; intIndex < intByteCount - 2; intIndex = intIndex + 3) {
intColors[intIndex / 3] = (intAlpha << 24) | (bytesImage[intIndex] << 16) | (bytesImage[intIndex + 1] << 8) | bytesImage[intIndex + 2];
}
Bitmap bmpImage = Bitmap.createBitmap(intColors, intWidth, intHeight, Bitmap.Config.ARGB_8888);
InputStream is = new java.net.URL(urldisplay).openStream();
byte[] colors = IOUtils.toByteArray(is);
int nrOfPixels = colors.length / 3; // Three bytes per pixel.
int pixels[] = new int[nrOfPixels];
for(int i = 0; i < nrOfPixels; i++) {
int r = (int)(0xFF & colors[3*i]);
int g = (int)(0xFF & colors[3*i+1]);
int b = (int)(0xFF & colors[3*i+2]);
pixels[i] = Color.rgb(r,g,b);
}
imageBitmap = Bitmap.createBitmap(pixels, width, height,Bitmap.Config.ARGB_4444);
bmImage.setImageBitmap(imageBitmap );

Generated Bitmap has only zero values

I'm trying to decode a byte array into a bitmap to be used in android.
The byte array that i use for decoding is generated by an OpenGl conmmand named GlReadPixels and the data inside is correct.
MainActivity.dataInputStream.readFully(Image, 0,256 * 256 * 4);
//converting from rgba to argb
for (int i = 0; i < Image.length - 1; i = i + 4) {
aux = (Image[i + 3] & 0xFF);
IntImage[i + 3] = (int) (Image[i + 2] & 0xFF);
IntImage[i + 2] = (int) (Image[i + 1] & 0xFF);
IntImage[i + 1] = (int) (Image[i] & 0xFF);
IntImage[i] = aux;
}
if i do this :bmp = Bitmap.createBitmap(IntImage, 256, 256,Config.ARGB_8888); or this:
bmp = Bitmap.createBitmap(256, 256, Config.ARGB_8888);
bmp.setPixels(IntImage, 0, 256, 0, 0, 256, 256); ,
the resulted bitmap will have only 0 values.
Can somebody please tell me why that is ?
Bitmap.createBitmap is expecting array of 256*256 size, and you are giving it 256*256*4, and therefore you are giving one part only and it looks like this, 0x000000FF .. 0x000000RR, 0x000000GG, if you understand what I mean.
change your argb function in something like this
int[] imageARGB = new int[256 * 256];
for(int i = 0; i < imageARGB.length, i++){
imageARGB[i] = Color.argb(yourAlpha, RR, GG, BB);
}
Hope this helps and enjoy your work

How to change orientation of camera preview callback buffer?

This is a variation on a question often asked hereabouts but I don't see this exact situation, so I'll throw it out there.
I have an onPreviewFrame callback set up. This gets a byte[] with NV21 data in it. We h.264 encode it and send it out as a video stream. On the other side, we see the video skewed, either 90 or 270 degrees, depending on the phone.
So the question is, how to rotate the data, not just the preview image? Camera.Parameters.setRotation only affects taking the picture, not video. Camera.setDisplayOrientation specifically says it only affects the displaying preview, not the frame bytes:
This does not affect the order of byte array passed in onPreviewFrame(byte[], Camera), JPEG pictures, or recorded videos.
So is there a way, at any API level, to change the orientation of the byte array? Failing that, can you even rotate the NV21 (YVU) format that this come in, or do I need to RGB it first?
Turns out you do need to rotate each frame yourself before sending it off. We ended up using libyuv, which has a very convenient function that both rotates and converts it - libyuv::ConvertToI420
https://code.google.com/p/libyuv/
I think that you would need to rotate the picture yourself. I did it once using the NDK and the leptonica library. A look at my code should get you started. Performance was okayish on a Samsung Galaxy S2 (i think i got around 15 frames or so). Since i was pushing the result into an openGL texture i had to also swizzle the color bytes around..
You could speed it up by rotating the image directly in the loop which decodes the yuv data..
mPix32 and mPix8 were previously allocated to hold the converted data.You would need to replace with your own image data structure of course..
jint Java_de_renard_ImageFilter_nativeProcessImage(JNIEnv *env, jobject javathis, jbyteArray frame) {
....
jbyte *data_buffer = env->GetByteArrayElements(frame, NULL);
l_uint8 *byte_buffer = (l_uint8 *) data_buffer;
yuvToPixFast(byte_buffer, mPix32, mPix8);
env->ReleaseByteArrayElements(frame, data_buffer, JNI_ABORT);
....
}
static inline void yuvToPixFast(unsigned char* pY, Pix* pix32, Pix* pix8) {
int i, j;
int nR, nG, nB;
int nY, nU, nV;
l_uint32* data = pixGetData(pix32);
l_uint32* data8 = pixGetData(pix8);
l_int32 height = pixGetHeight(pix32);
l_int32 width = pixGetWidth(pix32);
l_int32 wpl = pixGetWpl(pix32);
l_int32 wpl8 = pixGetWpl(pix8);
l_uint8 **lineptrs = pixSetupByteProcessing(pix8, NULL, NULL);
l_uint8* line8;
//memcpy(data8,pY,height*width);
unsigned char* pUV = pY + width * height;
for (i = 0; i < height; i++) {
nU = 0;
nV = 0;
unsigned char* uvp = pUV + (i >> 1) * width;
line8 = lineptrs[i];
memcpy(line8, pY, wpl8 * 4);
for (j = 0; j < width; j++) {
if ((j & 1) == 0) {
nV = (0xff & *uvp++) - 128;
nU = (0xff & *uvp++) - 128;
}
// Yuv Convert
nY = *(pY++);
//*line8++ = (l_uint8) nY;
nY -= -16;
if (nY < 0) {
nY = 0;
}
int y1192 = nY * 1192;
/*double saturation to increase cartoon effect*/
//nU<<=1;
//nV<<=1;
nB = y1192 + 2066 * nU;
nG = y1192 - 833 * nV - 400 * nU;
nR = y1192 + 1634 * nV;
if (nR < 0) {
nR = 0;
} else if (nR > 262143) {
nR = 262143;
}
if (nG < 0) {
nG = 0;
} else if (nG > 262143) {
nG = 262143;
}
if (nB < 0) {
nB = 0;
} else if (nB > 262143) {
nB = 262143;
}
//RGBA
//ABGR
*data++ = ((nR << 14) & 0xff000000) | ((nG << 6) & 0xff0000) | ((nB >> 2) & 0xff00) | (0xff);
//*data++ = (0x00 << 24) | (0xff<<16) | (0x00<<8) | ( 0xff) ;
//*data++ = (0xff << 24) | ((nB << 6) & 0xff0000) | ((nG >> 2) & 0xff00) | ((nR >> 10) & 0xff);
}
}
pixCleanupByteProcessing(pix8, lineptrs);
}

Categories

Resources