How to change orientation of camera preview callback buffer? - android

This is a variation on a question often asked hereabouts but I don't see this exact situation, so I'll throw it out there.
I have an onPreviewFrame callback set up. This gets a byte[] with NV21 data in it. We h.264 encode it and send it out as a video stream. On the other side, we see the video skewed, either 90 or 270 degrees, depending on the phone.
So the question is, how to rotate the data, not just the preview image? Camera.Parameters.setRotation only affects taking the picture, not video. Camera.setDisplayOrientation specifically says it only affects the displaying preview, not the frame bytes:
This does not affect the order of byte array passed in onPreviewFrame(byte[], Camera), JPEG pictures, or recorded videos.
So is there a way, at any API level, to change the orientation of the byte array? Failing that, can you even rotate the NV21 (YVU) format that this come in, or do I need to RGB it first?

Turns out you do need to rotate each frame yourself before sending it off. We ended up using libyuv, which has a very convenient function that both rotates and converts it - libyuv::ConvertToI420
https://code.google.com/p/libyuv/

I think that you would need to rotate the picture yourself. I did it once using the NDK and the leptonica library. A look at my code should get you started. Performance was okayish on a Samsung Galaxy S2 (i think i got around 15 frames or so). Since i was pushing the result into an openGL texture i had to also swizzle the color bytes around..
You could speed it up by rotating the image directly in the loop which decodes the yuv data..
mPix32 and mPix8 were previously allocated to hold the converted data.You would need to replace with your own image data structure of course..
jint Java_de_renard_ImageFilter_nativeProcessImage(JNIEnv *env, jobject javathis, jbyteArray frame) {
....
jbyte *data_buffer = env->GetByteArrayElements(frame, NULL);
l_uint8 *byte_buffer = (l_uint8 *) data_buffer;
yuvToPixFast(byte_buffer, mPix32, mPix8);
env->ReleaseByteArrayElements(frame, data_buffer, JNI_ABORT);
....
}
static inline void yuvToPixFast(unsigned char* pY, Pix* pix32, Pix* pix8) {
int i, j;
int nR, nG, nB;
int nY, nU, nV;
l_uint32* data = pixGetData(pix32);
l_uint32* data8 = pixGetData(pix8);
l_int32 height = pixGetHeight(pix32);
l_int32 width = pixGetWidth(pix32);
l_int32 wpl = pixGetWpl(pix32);
l_int32 wpl8 = pixGetWpl(pix8);
l_uint8 **lineptrs = pixSetupByteProcessing(pix8, NULL, NULL);
l_uint8* line8;
//memcpy(data8,pY,height*width);
unsigned char* pUV = pY + width * height;
for (i = 0; i < height; i++) {
nU = 0;
nV = 0;
unsigned char* uvp = pUV + (i >> 1) * width;
line8 = lineptrs[i];
memcpy(line8, pY, wpl8 * 4);
for (j = 0; j < width; j++) {
if ((j & 1) == 0) {
nV = (0xff & *uvp++) - 128;
nU = (0xff & *uvp++) - 128;
}
// Yuv Convert
nY = *(pY++);
//*line8++ = (l_uint8) nY;
nY -= -16;
if (nY < 0) {
nY = 0;
}
int y1192 = nY * 1192;
/*double saturation to increase cartoon effect*/
//nU<<=1;
//nV<<=1;
nB = y1192 + 2066 * nU;
nG = y1192 - 833 * nV - 400 * nU;
nR = y1192 + 1634 * nV;
if (nR < 0) {
nR = 0;
} else if (nR > 262143) {
nR = 262143;
}
if (nG < 0) {
nG = 0;
} else if (nG > 262143) {
nG = 262143;
}
if (nB < 0) {
nB = 0;
} else if (nB > 262143) {
nB = 262143;
}
//RGBA
//ABGR
*data++ = ((nR << 14) & 0xff000000) | ((nG << 6) & 0xff0000) | ((nB >> 2) & 0xff00) | (0xff);
//*data++ = (0x00 << 24) | (0xff<<16) | (0x00<<8) | ( 0xff) ;
//*data++ = (0xff << 24) | ((nB << 6) & 0xff0000) | ((nG >> 2) & 0xff00) | ((nR >> 10) & 0xff);
}
}
pixCleanupByteProcessing(pix8, lineptrs);
}

Related

Custom byteArray data to WebRTC videoTrack

I need to use WebRTC for android to send specific cropped(face) video to the videoChannel. I was able manipulate Camera1Session class of WebRTC to get the face cropped. Right now I am setting it to an ImageView.
listenForBytebufferFrames() of Camera1Session.java
private void listenForBytebufferFrames() {
this.camera.setPreviewCallbackWithBuffer(new PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera callbackCamera) {
Camera1Session.this.checkIsOnCameraThread();
if(callbackCamera != Camera1Session.this.camera) {
Logging.e("Camera1Session", "Callback from a different camera. This should never happen.");
} else if(Camera1Session.this.state != Camera1Session.SessionState.RUNNING) {
Logging.d("Camera1Session", "Bytebuffer frame captured but camera is no longer running.");
} else {
mFrameProcessor.setNextFrame(data, callbackCamera);
long captureTimeNs = TimeUnit.MILLISECONDS.toNanos(SystemClock.elapsedRealtime());
if(!Camera1Session.this.firstFrameReported) {
int startTimeMs = (int)TimeUnit.NANOSECONDS.toMillis(System.nanoTime() - Camera1Session.this.constructionTimeNs);
Camera1Session.camera1StartTimeMsHistogram.addSample(startTimeMs);
Camera1Session.this.firstFrameReported = true;
}
ByteBuffer byteBuffer1 = ByteBuffer.wrap(data);
Frame outputFrame = new Frame.Builder()
.setImageData(byteBuffer1,
Camera1Session.this.captureFormat.width,
Camera1Session.this.captureFormat.height,
ImageFormat.NV21)
.setTimestampMillis(mFrameProcessor.mPendingTimeMillis)
.setId(mFrameProcessor.mPendingFrameId)
.setRotation(3)
.build();
int w = outputFrame.getMetadata().getWidth();
int h = outputFrame.getMetadata().getHeight();
SparseArray<Face> detectedFaces = mDetector.detect(outputFrame);
if (detectedFaces.size() > 0) {
Face face = detectedFaces.valueAt(0);
ByteBuffer byteBufferRaw = outputFrame.getGrayscaleImageData();
byte[] byteBuffer = byteBufferRaw.array();
YuvImage yuvimage = new YuvImage(byteBuffer, ImageFormat.NV21, w, h, null);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
//My crop logic to get face co-ordinates
yuvimage.compressToJpeg(new Rect(left, top, right, bottom), 80, baos);
final byte[] jpegArray = baos.toByteArray();
Bitmap bitmap = BitmapFactory.decodeByteArray(jpegArray, 0, jpegArray.length);
Activity currentActivity = getActivity();
if (currentActivity instanceof CallActivity) {
((CallActivity) currentActivity).setBitmapToImageView(bitmap); //face on ImageView is set just fine
}
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
} else {
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, data, Camera1Session.this.captureFormat.width, Camera1Session.this.captureFormat.height, Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
}
}
}
});
}
jpegArray is the final byteArray that I need to stream via WebRTC, which I tried with something like this:
Camera1Session.this.events.onByteBufferFrameCaptured(Camera1Session.this, jpegArray, (int) face.getWidth(), (int) face.getHeight(), Camera1Session.this.getFrameOrientation(), captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(jpegArray);
Setting them up like this gives me following error:
../../webrtc/sdk/android/src/jni/androidvideotracksource.cc line 82
Check failed: length >= width * height + 2 * uv_width * ((height + 1) / 2) (2630 vs. 460800)
Which I assume is because androidvideotracksource does not get the same length of byteArray that it expects, since the frame is cropped now.
Could someone point me in the direction of how to achieve it? Is this the correct way/place to manipulate the data and feed into the videoTrack?
Edit:bitmap of byteArray data does not give me a camera preview on ImageView, unlike byteArray jpegArray. Maybe because they are packed differently?
Can we use WebRTC's Datachannel to exchang custom data ie cropped face "image" in your case and do the respective calculation at receiving end using any third party library ie OpenGL etc? Reason I am suggesting is that the WebRTC Video feed received from channel is a stream in real time not a bytearray . WebRTC Video by its inherent architecture isn't meant to crop video at other hand. If we want to crop or augment video we have to use any ar library to fulfill this job.
We can always leverage WebRTC's Data channel to exchange customized data. Using Video channel for the same is not recommended because it's real time stream not the bytearray.Please revert in case of any concern.
WebRTC in particular and video streaming in general presumes that the video has fixed dimensions. If you want to crop the detected face, your options are either to have pad the cropped image with e.g. black pixels (WebRTC does not use transparency), and crop the video on the receiver side, or, if you don't have control over the receiver, resize the cropped region to fill the expected width * height frame (you should also keep the expected aspect ratio).
Note that JPEG compress/decompress that you use to crop the original is far from efficient. Some other options can be found in Image crop and resize in Android.
Okay, this was definitely a problem of how the original byte[] data was packed and the way byte[] jpegArray was packed. Changing the way of packing this and scaling it as AlexCohn suggested worked for me. I found help from other post on StackOverflow on way to pack it. This is the code for it:
private byte[] getNV21(int left, int top, int inputWidth, int inputHeight, Bitmap scaled) {
int [] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, left, top, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
private void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}`
I pass this byte[] data to onByteBufferFrameCaptured and callback:
Camera1Session.this.events.onByteBufferFrameCaptured(
Camera1Session.this,
data,
w,
h,
Camera1Session.this.getFrameOrientation(),
captureTimeNs);
Camera1Session.this.camera.addCallbackBuffer(data);
Prior to this, I had to scale the bitmap which is pretty straight forward:
int width = bitmapToScale.getWidth();
int height = bitmapToScale.getHeight();
Matrix matrix = new Matrix();
matrix.postScale(newWidth / width, newHeight / height);
Bitmap scaledBitmap = Bitmap.createBitmap(bitmapToScale, 0, 0, bitmapToScale.getWidth(), bitmapToScale.getHeight(), matrix, true);

NV21 to Bitmap on Android, Very dark image, grayscale, or yellow tint?

I have been looking at converting the NV21 byte[] that I get from onPreviewFrame(). I have searched the forums and google for various solutions. I have tried RenderScripts and some other code examples. Some of them give me an image with a yellow tint, some give me an image with red and blue flipped (after I flip it back in the code, I get yellow tint back), some give me strange color features all throughout the image (almost like a negative), some give me a grayscale image, some give me an image so dark you can't really make anything out.
Since I am the one typing the question, I realize I must be the idiot in the room so we will start with this post. This particular solution gives me a very dark image, but I am not cool enough to be able to comment yet. Has anyone tried this solution or has one that produces an image with the same quality as the original NV21 format?
I need either a valid ARGB byte[] or a valid Bitmap, I can modify my project to deal with either. Just for reference I have tried these (and a few others that are really just carbon copies of these):
One solution I tried
Another solution I tried
If you are trying to convert YUV from camera to Bitmap, here is something you can try:
// import android.renderscript.*
// RenderScript mRS;
// ScriptIntrinsicYuvToRGB mYuvToRGB;
// Allocation yuvPreviewAlloc;
// Allocation rgbOutputAlloc;
// Create RenderScript context, ScriptIntrinsicYuvToRGB and Allocations and keep reusing them.
if (NotInitialized) {
mRS = RenderScript.create(this).
mYuvToRGB = ScriptIntrinsicYuvToRGB.create(mRS, Element.YUV(mRS));
// Create a RS Allocation to hold NV21 data.
Type.Builder tYuv = new Type.Builder(mRS, Element.YUV(mRS));
tYuv.setX(width).setY(height).setYuvFormat(android.graphics.ImageFormat.NV21);
yuvPreviewAlloc = Allocation.createTyped(mRS, tYuv.create(), Allocation.USAGE_SCRIPT | Allocation.USAGE_IO_INPUT);
// Create a RS Allocation to hold RGBA data.
Type.Builder tRgb = new Type.Builder(mRS, Element.RGBA_8888(mRS));
tRgb.setX(width).tRgb(height);
rgbOutputAlloc = Allocation.createTyped(mRS, tRgb.create(), Allocation.USAGE_SCRIPT);
// Set input of ScriptIntrinsicYuvToRGB
mYuvToRGB.setInput(yuvPreviewAlloc);
}
// Use rsPreviewSurface as one of the output surface from Camera API.
// You can refer to https://github.com/googlesamples/android-HdrViewfinder/blob/master/Application/src/main/java/com/example/android/hdrviewfinder/HdrViewfinderActivity.java#L504
Surface rsPreviewSurface = yuvPreviewAlloc.getSurface();
...
// Whenever a new frame is available
// Update the yuv Allocation with a new Camera buffer without any copy.
// You can refer to https://github.com/googlesamples/android-HdrViewfinder/blob/master/Application/src/main/java/com/example/android/hdrviewfinder/ViewfinderProcessor.java#L109
yuvPreviewAlloc.ioReceive();
// The actual Yuv to Rgb conversion.
mYuvToRGB.forEach(rgbOutputAlloc);
// Copy the rgb Allocation to a Bitmap.
rgbOutputAlloc.copyTo(mBitmap);
// continue processing mBitmap.
...
When using ScriptIntrinsics I highly recommend to update to at least JellyBean 4.3 or higher (API18). Things are much easier to use than in JB 4.2 (API 17).
ScriptIntrinsicYuvToRGB is not as complicated as it seems.
Especially you donĀ“t need Type.Builder objects.
Camera preview format must be NV21 !
in the onCreate()... method create the RenderScript object and the Intrinsic:
mRS = RenderScript.create(this);
mYuvToRGB = ScriptIntrinsicYuvToRGB.create(mRS, Element.U8_4(mRS));
With your cameraPreviewWidth and cameraPreviewHeight calculate the
length of the camera data byte array:
int yuvDatalength = cameraPreviewWidth*cameraPreviewHeight*3/2 ; // this is 12 bit per pixel
You need a bitmap for output:
mBitmap = Bitmap.createBitmap(cameraPreviewWidth, cameraPreviewHeight, Bitmap.Config.ARGB_8888);
Then you create the input and output allocations (here are the changes in API18+)
yuvPreviewAlloc = Allocation.createSized(mRS, Element.U8(mRS), yuvDatalength);
rgbOutputAlloc = Allocation.createFromBitmap(mRS, mBitmap); // this simple !
and set the script-input to the input allocation
mYuvToRGB.setInput(yuvPreviewAlloc); // this has to be done only once !
In the camera loop (whenever a new frame is avaliable), copy the NV21 byte-array (data[]) to the yuvPreviewAlloc, execute the script and copy result to bitmap:
yuvPreviewAlloc.copyFrom(data); // or yuvPreviewAlloc.copyFromUnchecked(data);
mYuvToRGB.forEach(rgbOutputAlloc);
rgbOutputAlloc.copyTo(mBitmap);
For example: on Nexus 7 (2013, JellyBean 4.3) a full HD (1920x1080) camera preview conversion takes about 7 ms.
I was able to get a different method working (one that was previously linked) by using the code here. But that was giving the Red/Blue color flip. So, I just rearranged the U and V lines and all was ok. This is not as fast as a RenderScript though. It would be good to have a RenderScript that functioned properly. Here is the code:
static public void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
u = (0xff & yuv420sp[uvp++]) - 128; //Just changed the order
v = (0xff & yuv420sp[uvp++]) - 128; //It was originally v then u
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
Any one have a RenderScript that doesn't have color tint and or flip problems?

Understanding Android RingDroid WAV Calculations

I have been studying the RingDroid source, trying to figure out how to draw waveforms on Android device. However, I got stuck in the section about reading the WAV file at CheapWAV.java.
public void ReadFile(File inputFile)
throws java.io.FileNotFoundException,
java.io.IOException {
super.ReadFile(inputFile);
mFileSize = (int)mInputFile.length();
if (mFileSize < 128) {
throw new java.io.IOException("File too small to parse");
}
FileInputStream stream = new FileInputStream(mInputFile);
byte[] header = new byte[12];
stream.read(header, 0, 12);
mOffset += 12;
if (header[0] != 'R' ||
header[1] != 'I' ||
header[2] != 'F' ||
header[3] != 'F' ||
header[8] != 'W' ||
header[9] != 'A' ||
header[10] != 'V' ||
header[11] != 'E') {
throw new java.io.IOException("Not a WAV file");
}
mChannels = 0;
mSampleRate = 0;
while (mOffset + 8 <= mFileSize) {
byte[] chunkHeader = new byte[8];
stream.read(chunkHeader, 0, 8);
mOffset += 8;
int chunkLen =
((0xff & chunkHeader[7]) << 24) |
((0xff & chunkHeader[6]) << 16) |
((0xff & chunkHeader[5]) << 8) |
((0xff & chunkHeader[4]));
if (chunkHeader[0] == 'f' &&
chunkHeader[1] == 'm' &&
chunkHeader[2] == 't' &&
chunkHeader[3] == ' ') {
if (chunkLen < 16 || chunkLen > 1024) {
throw new java.io.IOException(
"WAV file has bad fmt chunk");
}
byte[] fmt = new byte[chunkLen];
stream.read(fmt, 0, chunkLen);
mOffset += chunkLen;
int format =
((0xff & fmt[1]) << 8) |
((0xff & fmt[0]));
mChannels =
((0xff & fmt[3]) << 8) |
((0xff & fmt[2]));
mSampleRate =
((0xff & fmt[7]) << 24) |
((0xff & fmt[6]) << 16) |
((0xff & fmt[5]) << 8) |
((0xff & fmt[4]));
if (format != 1) {
throw new java.io.IOException(
"Unsupported WAV file encoding");
}
} else if (chunkHeader[0] == 'd' &&
chunkHeader[1] == 'a' &&
chunkHeader[2] == 't' &&
chunkHeader[3] == 'a') {
if (mChannels == 0 || mSampleRate == 0) {
throw new java.io.IOException(
"Bad WAV file: data chunk before fmt chunk");
}
int frameSamples = (mSampleRate * mChannels) / 50;
mFrameBytes = frameSamples * 2;
mNumFrames = (chunkLen + (mFrameBytes - 1)) / mFrameBytes;
mFrameOffsets = new int[mNumFrames];
mFrameLens = new int[mNumFrames];
mFrameGains = new int[mNumFrames];
byte[] oneFrame = new byte[mFrameBytes];
int i = 0;
int frameIndex = 0;
while (i < chunkLen) {
int oneFrameBytes = mFrameBytes;
if (i + oneFrameBytes > chunkLen) {
i = chunkLen - oneFrameBytes;
}
stream.read(oneFrame, 0, oneFrameBytes);
int maxGain = 0;
for (int j = 1; j < oneFrameBytes; j += 4 * mChannels) {
int val = java.lang.Math.abs(oneFrame[j]);
if (val > maxGain) {
maxGain = val;
}
}
mFrameOffsets[frameIndex] = mOffset;
mFrameLens[frameIndex] = oneFrameBytes;
mFrameGains[frameIndex] = maxGain;
frameIndex++;
mOffset += oneFrameBytes;
i += oneFrameBytes;
if (mProgressListener != null) {
boolean keepGoing = mProgressListener.reportProgress(
i * 1.0 / chunkLen);
if (!keepGoing) {
break;
}
}
}
} else {
stream.skip(chunkLen);
mOffset += chunkLen;
}
}
}
Everything seems straight forward until I reach
int frameSamples = (mSampleRate * mChannels) / 50;
mFrameBytes = frameSamples * 2;
mNumFrames = (chunkLen + (mFrameBytes - 1)) / mFrameBytes;
Q1. Where did the 50 magic number came from? Is it just assuming the frame duration is 50?
Q2. Why is mFrameBytes = frameSample * 2? Is it assuming each sample is 2 byte? But why?
for (int j = 1; j < oneFrameBytes; j += 4 * mChannels) {
int val = java.lang.Math.abs(oneFrame[j]);
if (val > maxGain) {
maxGain = val;
}
}
Q3. Why is j incrementing by 4 * mChannels? How was 4 justified?
Q4. What does frameGains mean actually? I've went through although articles/blogs such as
https://ccrma.stanford.edu/courses/422/projects/WaveFormat/2
http://blogs.msdn.com/b/dawate/archive/2009/06/23/intro-to-audio-programming-part-2-demystifying-the-wav-format.aspx
http://www.speakingcode.com/2011/12/31/primer-on-digital-audio-and-pulse-code-modulation-pcm/
But I don't see such term mentioned any where.
Hope someone can shed some light on this. Thank you.
Q1. Where did the 50 magic number came from? Is it just assuming the frame duration is 50?
A1. That calculates 1/50th of a second as a frame. So the app would have to process 50 frame buffers if audio data per second.
Q2. Why is mFrameBytes = frameSample * 2? Is it assuming each sample is 2 byte? But why?
A2. I'm guessing this because he is assuming 16bit samples.
Q3. Why is j incrementing by 4 * mChannels? How was 4 justified?
A3. I think the key here is to note it starts from offset 1. Which means he is only sampling the high order byte for the sample. The 4 is probably just an optimisation so he's only processing a half the buffer (remember he's assuming 2 bytes per sample)
Q4. What does frameGains mean actually?
Well it's exactly what it says. It's the gain of that frame (1/50th of a second) See http://en.m.wikipedia.org/wiki/Gain or Google for: Audio Gain.
This should also help: https://ccrma.stanford.edu/courses/422/projects/WaveFormat/

Android way to use Bitmaps from Camera.setPreviewCallback

camera.setPreviewCallback(new Camera.PreviewCallback() {
private long timestamp=0;
public synchronized void onPreviewFrame(byte[] data, Camera camera) {
Log.e("CameraTest","Time Gap = "+(System.currentTimeMillis()-timestamp));
timestamp=System.currentTimeMillis();
Bitmap mFaceBitmap = BitmapFactory.decodeByteArray(data, 0, data.length);
if (mFaceBitmap!=null) FaceDetection.calculate(mFaceBitmap);
camera.addCallbackBuffer(data);
return;
}
});
I have a camera View, and in front of a simple View (where I can draw something).
I'd like to draw on the front of View, when I can find the face of a human.
But mFaceBitmap is ever and ever return null, why?
If this is a bad idea, how can I do this better?
When you set-up the camera you will need to set the preview size and the preview format. Here is some sample code to give the rough idea:
int previewFormat = 0;
for (int format : parameters.getSupportedPreviewFormats()) {
if (format == FORMAT_NV21) {
previewFormat = FORMAT_NV21;
} else if (previewFormat == 0 && (format == FORMAT_JPEG || format == FORMAT_RGB_565)) {
previewFormat = format;
}
}
// TODO: Iterate on supported preview sizes and pick best one
parameters.setPreviewSize(previewSize.width, previewSize.height);
if (previewFormat != 0) {
parameters.setPreviewFormat(previewFormat);
} else {
// Error on unsupported format
}
Now in the callback you can do something like:
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
Bitmnap bitmap;
if (previewFormat == FORMAT_NV21) {
int[] previewPixels = new int[previewSize.width * previewSize.height];
decodeYUV420SP(previewPixels, data, previewSize.width, previewSize.height);
bitmap = Bitmap.createBitmap(rgbPixels, previewSize.width, previewSize.height, Bitmap.Config.RGB_565);
} else if (previewFormat == FORMAT_JPEG || previewFormat == FORMAT_RGB_565) {
// RGB565 and JPEG
BitmapFactory.Options opts = new BitmapFactory.Options();
opts.inDither = true;
opts.inPreferredConfig = Bitmap.Config.RGB_565;
bitmap = BitmapFactory.decodeByteArray(data, 0, data.length, opts);
}
}
And finally, the conversion
static void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((b >> 10) & 0xff);
}
}
}
You can't use Bitmap.decodeByteArray to convert a camera's preview output into a bitmap, unfortunately.
decodeByteArray is designed for converting JPEG/PNG/etc images into bitmaps, and it doesn't have any way of knowing what the data in the preview callback is like, because it's a simple raw array of pixel values with no identifying header.
You have to do the conversion yourself. There are many ways to do this, of various degrees of efficiency - I'll write out the simplest one here, but it's also probably the slowest.
The data byte array from the camera is encoded in some particular pixel format, which is specified by Camera.Parameters.setPreviewFormat. If you haven't called this, the default format is NV21. NV21 is guaranteed to work on all Android devices; on Android versions >= 3.0, the YV12 format is also guaranteed to work.
Both of these are YUV formats, meaning the color is encoded as a luminance (brightness) channel and two chroma (color) channels. The functions for setting pixel values on a Bitmap (primarily setPixels) require information in the RGB color space instead, so a conversion is required. In addition, both NV21 and YV12 subsample the chroma channels - if you have a 640x480 image, for example, there will be 640x480 pixels in the luminance channel, but only 320x240 pixels in the two chroma channels.
This means you need to create a new int[] array of the right size, and then loop over the byte[] data array, collecting up a set of Y, U, and V values, convert them to RGB, and write them to the int[] array, and then call setPixels on your destination bitmap. The color conversion matrix you need is the JPEG YCbCr->RGB matrix, which you can find at Wikipedia, for example. You can find out about the layout of NV21 or YV12 at fourcc, as one example
If you really don't want to mess with all that, you can also use the YuvImage class, albeit in a roundabout way. You can construct a YuvImage instance from the preview data, as long as you're using the NV21 format, and then save a JPEG from it into a ByteArrayOutputStream. You can then get the byte[] from the stream, and decode it into a bitmap using Bitmap.decodeByteArray. This is a completely unnecessary roundtrip to JPEG and back, so it's quite inefficient and can cause quality loss, but it only requires a few lines of code.
In the latest version of Android, you can also use Renderscript to efficiently do this conversion. You'll need to copy the data into an Allocation, and then use the YUV to RGB script intrinsic to do the conversion.
Finally, you can pass the data and destination bitmap into JNI code, where you can access the Bitmap directly, and write the conversion function there in C or C++. This requires a lot of scaffolding, but is very efficient.

Creating a new Bitmap with Pixel Data in JNI?

I've the below code to create a BitMap (Just a Black / Gray Image) in the JNI with 'ARGB_8888' configuration. But when I dump the content of the Bitmap in the Java code, I'm able to see only the configurations, but not the Pixel Data in the Bitmap.
JNI Code
// Image Details
int imgWidth = 128;
int imgHeight = 128;
int numPix = imgWidth * imgHeight;
// Creaing Bitmap Config Class
jclass bmpCfgCls = env->FindClass("android/graphics/Bitmap$Config");
jmethodID bmpClsValueOfMid = env->GetStaticMethodID(bmpCfgCls, "valueOf", "(Ljava/lang/String;)Landroid/graphics/Bitmap$Config;");
jobject jBmpCfg = env->CallStaticObjectMethod(bmpCfgCls, bmpClsValueOfMid, env->NewStringUTF("ARGB_8888"));
// Creating a Bitmap Class
jclass bmpCls = env->FindClass("android/graphics/Bitmap");
jmethodID createBitmapMid = env->GetStaticMethodID(bmpCls, "createBitmap", "(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
jBmpObj = env->CallStaticObjectMethod(bmpCls, createBitmapMid, imgWidth, imgHeight, jBmpCfg);
// Creating Pixel Data
int triplicateLen = numPix * 4;
char *tripPixData = (char*)malloc(triplicateLen);
for (int lc = 0; lc < triplicateLen; lc++)
{
// Gray / Black Image
if (0 == (lc%4))
tripPixData[lc] = 0x7F; // Alpha
else
tripPixData[lc] = 0x00; // RGB
}
// Setting Pixels in Bitmap
jByteArr = env->NewByteArray(triplicateLen);
env->SetByteArrayRegion(jByteArr, 0, triplicateLen, (jbyte*)tripPixData);
jmethodID setPixelsMid = env->GetMethodID(bmpCls, "setPixels", "([IIIIIII)V");
env->CallVoidMethod(jBmpObj, setPixelsMid, (jintArray)jByteArr, 0, imgWidth, 0, 0, imgWidth, imgHeight);
free(tripPixData);
// Return BitMap Object
return jBmpObj;
In JAVA (Output)
// Checking the Configuration / Image Details
jBmpObj.getWidth() - 128
jBmpObj.getHeight() - 128
jBmpObj.getRowBytes() - 512
jBmpObj.getConfig() - ARGB 8888
// Getting Pixel Data
imgPixs = new int[jBmpObj.getWidth() * jBmpObj.getHeight()];
jBmpObj.getPixels(imgPixs, 0, jBmpObj.getWidth(), 0, 0, jBmpObj.getWidth(), jBmpObj.getHeight());
// Running a Loop on the imgPixs
imgPixs[<0 - imgPixs.lenght>] - 0 (Every Pixel Data)
I used the same concept to create a Bitmap in the Java Code, and it works fine (Even I'm able to see the image). But I want the logic to be in the JNI part and not in Java Code. So I tried the above logic and it failed in setting the Pixel Data.
Any input in fixing this issue will be really helpful,..
Full working example:
jclass bitmapConfig = jniEnv->FindClass("android/graphics/Bitmap$Config");
jfieldID rgba8888FieldID = jniEnv->GetStaticFieldID(bitmapConfig, "ARGB_8888", "Landroid/graphics/Bitmap$Config;");
jobject rgba8888Obj = jniEnv->GetStaticObjectField(bitmapConfig, rgba8888FieldID);
jclass bitmapClass = jniEnv->FindClass("android/graphics/Bitmap");
jmethodID createBitmapMethodID = jniEnv->GetStaticMethodID(bitmapClass,"createBitmap", "(IILandroid/graphics/Bitmap$Config;)Landroid/graphics/Bitmap;");
jobject bitmapObj = jniEnv->CallStaticObjectMethod(bitmapClass, createBitmapMethodID, _width, _height, rgba8888Obj);
jintArray pixels = jniEnv->NewIntArray(_width * _height);
for (int i = 0; i < _width * _height; i++)
{
unsigned char red = bitmap[i*4];
unsigned char green = bitmap[i*4 + 1];
unsigned char blue = bitmap[i*4 + 2];
unsigned char alpha = bitmap[i*4 + 3];
int currentPixel = (alpha << 24) | (red << 16) | (green << 8) | (blue);
jniEnv->SetIntArrayRegion(pixels, i, 1, &currentPixel);
}
jmethodID setPixelsMid = jniEnv->GetMethodID(bitmapClass, "setPixels", "([IIIIIII)V");
jniEnv->CallVoidMethod(bitmapObj, setPixelsMid, pixels, 0, _width, 0, 0, _width, _height);
where bitmap is unsigned char*.
You cannot cast byte[] to int[] in Java, therefore you cannot cast it in JNI. But you can cast char* to int*, so you can simply use your tripPixData to fill a new jjintArray.
IN Android each pixel represented as 0xFFFFFFFF ie ARGB.
0xFF referes most significamt 8 bits of given data.
From your snippet, where you are getting soure image data? But i have solved this issue
by using following code base.i hope this ll help you.
// Creating Pixel Data
unsigned char* rawData = //your raw data
**Note**: here you have get each r,g & b component as 8 bit data //If it is rgb image,if it
is monochrome you can use raw data
int triplicateLen = imgheight * imgwidth;
int *tripPixData = (int*) malloc(triplicateLen * sizeof(int));
if(rgb){
for (int lc = 0; lc < triplicateLen ; lc++){
tripPixData [lc] = (0xFF << 24) | (r[lc] << 16) | (g[lc] << 8) | b[lc];
}
}else{
for (int lc = 0; lc < triplicateLen ; lc++){
tripPixData [lc] = (0xFF << 24) | (rawData [lc] << 16) | (rawData [lc] << 8) | rawData [lc];
}
}

Categories

Resources