How to overwrite onPreviewFrame data with static YUV frame? - android

My application overrides the onPreviewFrame callback to pass the current camera frame to a webrtc native function. This works perfectly, however I want to be able to switch to sending a static frame instead of video, if that option has been selected in my app.
So far I have created a YUV NV21 image, which I am storing in the assets dir. All attempts to pass that frame down to the native function have resulted in purple/green stripes rather than the actual image.
This is what I have so far;
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
previewBufferLock.lock();
if (mFrameProvider.isEnabled()) {
mFrameProvider.overwriteWithFrame(data, expectedFrameSize);
}
if (isCaptureRunning) {
if (data.length == expectedFrameSize) {
ProvideCameraFrame(data, expectedFrameSize, context);
cameraUtils.addCallbackBuffer(camera, data);
}
}
previewBufferLock.unlock();
}
#Override
public byte[] overwriteWithPreviewFrame(byte[] data, int expectedFrameSize) {
if (mFrameData == null) {
loadPreviewFrame();
}
for (int i=0; i < expectedFrameSize; i++) {
if (i < mFrameData.length) {
data[i] = mFrameData[i];
}
}
return data;
}
And
private void loadPreviewFrame() {
try {
InputStream open = mContext.getResources().getAssets().open(PREVIEW_FRAME_FILE);
mFrameData = IOUtils.toByteArray(open);
open.close();
} catch (Exception e) {
Log.e("", "", e);
}
}
I have tried converting the image to a bitmap too. So the question is how can I open a YUV frame from assets and convert it into a suitable format to pass to the native methods.
Results in the following output;

Right after a long fight with Android API I have managed to get this working.
There were two issues that caused the green/purple output;
Loss of data: the generated YUV frame was larger than the original preview frame at the same resolution, so the data being passed down to the native code was missing around 30% of its image data.
Wrong resolution: the native code required the resolution of the preview frame and not the camera.
Below is a working solution for anyone who wishes to add a static frame;
so updated code:
#Override
public byte[] getPreviewFrameData(int width, int height) {
if (mPreviewFrameData == null) {
loadPreviewFrame(width, height);
}
return mPreviewFrameData;
}
private void loadPreviewFrame(int width, int height) {
try {
Bitmap previewImage = BitmapFactory.decodeResource(mContext.getResources(), R.drawable.frame);
Bitmap resizedPreviewImage = Bitmap.createScaledBitmap(previewImage, width, height, false);
BitmapConverter bitmapConverter = new BitmapConverter();
mPreviewFrameData = bitmapConverter.convertToNV21(resizedPreviewImage);
} catch (Exception e) {
Log.e("DisabledCameraFrameProvider", "Failed to loadPreviewFrame");
}
}
class BitmapConverter {
byte [] convertToNV21(Bitmap bitmap) {
int inputWidth = bitmap.getWidth();
int inputHeight = bitmap.getHeight();
int [] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
bitmap.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff);
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
}
Then finally in your callback;
public void onPreviewFrame(byte[] data, Camera camera) {
byte[] bytes = data;
if (!mProvider.isVideoEnabled()) {
Camera.Size previewSize = camera.getParameters().getPreviewSize();
bytes = mProvider.getPreviewFrameData(previewSize.width, previewSize.height);
}
ProvideCameraFrame(bytes, bytes.length, context);
}
The key was to scale the image to the camera preview size and convert the image to YUV colour space.

Related

Share wikitude AR using openvidu

I'm working on a video conference app using openvidu. We are trying to include wikitude AR session in the call.
The problem is that both of them requires access to the camera so I have the next scenario: if I instantiate the local participant video first I can't start the wikitude AR session because video don't load. If I instantiate the wikitude session firstly the other participants of the call don't see the device video.
I was able to create a custom video capturer for openvidu, that imitates the camera. It is required to send every frame for it to works.
package org.webrtc;
import android.content.Context;
import android.graphics.Bitmap;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicReference;
public class CustomVideoCapturer implements VideoCapturer {
private final static String TAG = "FileVideoCapturer";
//private final FileVideoCapturer.VideoReader videoReader;
private final Timer timer = new Timer();
private CapturerObserver capturerObserver;
private AtomicReference<Bitmap> image = new AtomicReference<Bitmap>();
private final TimerTask tickTask = new TimerTask() {
#Override
public void run() {
tick();
}
};
public CustomVideoCapturer() {
}
public void tick() {
Bitmap frame = image.get();
if (frame != null && !frame.isRecycled()) {
NV21Buffer nv21Buffer = new NV21Buffer(getNV21(frame),frame.getWidth(),frame.getHeight(), null);
VideoFrame videoFrame = new VideoFrame(nv21Buffer, 0, System.nanoTime());
capturerObserver.onFrameCaptured(videoFrame);
}
}
byte [] getNV21(Bitmap image) {
int [] argb = new int[image.getWidth() * image.getHeight()];
image.getPixels(argb, 0, image.getWidth(), 0, 0, image.getWidth(), image.getHeight());
byte [] yuv = new byte[image.getWidth()*image.getHeight()*3/2];
encodeYUV420SP(yuv, argb, image.getWidth(), image.getHeight());
image.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
public void sendFrame(Bitmap bitmap) {
image.set(bitmap);
}
#Override
public void initialize(SurfaceTextureHelper surfaceTextureHelper, Context applicationContext,
CapturerObserver capturerObserver) {
this.capturerObserver = capturerObserver;
}
#Override
public void startCapture(int width, int height, int framerate) {
//timer.schedule(tickTask, 0, 1000 / framerate);
threadCV().start();
}
Thread threadCV() {
return new Thread() {
#Override
public void run() {
while (true) {
if (image.get() != null) {
tick();
}
try {
Thread.sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
};
}
#Override
public void stopCapture() throws InterruptedException {
timer.cancel();
}
#Override
public void changeCaptureFormat(int width, int height, int framerate) {
// Empty on purpose
}
#Override
public void dispose() {
//videoReader.close();
}
#Override
public boolean isScreencast() {
return false;
}
private interface VideoReader {
VideoFrame getNextFrame();
void close();
}
/**
* Read video data from file for the .y4m container.
*/
}
On the local participant I than use this function to send the frame:
public void sendFrame(Bitmap frame) {
customVideoCapturer.sendFrame(frame);
}
But I wasn't be able to thake the frames from the wikitude Camera. There is a way to access the frames and resend them?
Such as of the Native Api sdk, version 9.10.0, according to answer from wikitude support
https://support.wikitude.com/support/discussions/topics/5000096719?page=1 , to access the camera frames a custom plugin should be created:
https://www.wikitude.com/external/doc/documentation/latest/androidnative/pluginsapi.html#plugins-api

Unable to change ARGB to YUV420Planar

I'm attempting to use MediaCodec and MediaMuxer to change a series of JPEGs into a mp4.
No matter what I do, I always get a green staticy screen as output on the MP4.
Code Follows:
public class AvcEncoder
{
public bool CanEncode = true;
MediaCodec codec;
MediaMuxer muxer;
MediaFormat format;
public AvcEncoder()
{
codec = MediaCodec.CreateEncoderByType("video/avc");
format = MediaFormat.CreateVideoFormat("video/avc", 720, 480);
format.SetInteger(MediaFormat.KeyBitRate, 700000);
format.SetInteger(MediaFormat.KeyFrameRate, 10);
format.SetInteger(MediaFormat.KeyColorFormat, (int)Android.Media.MediaCodecCapabilities.Formatyuv420planar);
format.SetInteger(MediaFormat.KeyIFrameInterval, 5);
codec.Configure(format, null, null, MediaCodecConfigFlags.Encode);
codec.Start();
Java.IO.File f = new Java.IO.File(Android.OS.Environment.ExternalStorageDirectory, "Parkingdom");
if (!f.Exists())
{
f.Mkdirs();
}
muxer = new MediaMuxer(f.ToString() + "/test.mp4", MuxerOutputType.Mpeg4);
}
public void EncodeFrame(Bitmap image)
{
int mWidth = image.Width;
int mHeight = image.Height;
int[] mIntArray = new int[mWidth * mHeight];
// Copy pixel data from the Bitmap into the 'intArray' array
image.GetPixels(mIntArray, 0, mWidth, 0, 0, mWidth, mHeight);
byte[] byteArray = new byte[mWidth * mHeight * 3 / 2];
// Call to encoding function : convert intArray to Yuv Binary data
EncodeYUV420P(byteArray, mIntArray, mWidth, mHeight);
using (var stream = new MemoryStream())
{
image.Compress(Bitmap.CompressFormat.Png, 100, stream);
byteArray = stream.ToArray();
}
int inputBufferIndex = codec.DequeueInputBuffer(-1);
if (inputBufferIndex >= 0)
{
ByteBuffer buffer = codec.GetInputBuffer(inputBufferIndex);
buffer.Clear();
buffer.Put(byteArray);
codec.QueueInputBuffer(inputBufferIndex, 0, byteArray.Length, 0, 0);
}
}
public void SaveMp4()
{
CanEncode = false;
bool running = true;
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int track = -1;
while (running)
{
int index = codec.DequeueOutputBuffer(bufferInfo, 10000);
if (index == (int)MediaCodecInfoState.OutputFormatChanged)
{
MediaFormat format = codec.OutputFormat;
track = muxer.AddTrack(format);
muxer.Start();
}
else if (index == (int)MediaCodecInfoState.TryAgainLater)
{
break;
}
else if (index >= 0)
{
if ((bufferInfo.Flags & MediaCodecBufferFlags.CodecConfig) != 0)
{
bufferInfo.Size = 0;
}
if (track != -1)
{
ByteBuffer outBuffer = codec.GetOutputBuffer(index);
outBuffer.Position(bufferInfo.Offset);
outBuffer.Limit(bufferInfo.Offset + bufferInfo.Size);
muxer.WriteSampleData(track, outBuffer, bufferInfo);
codec.ReleaseOutputBuffer(index, false);
}
}
}
codec.Stop();
codec.Release();
muxer.Stop();
muxer.Release();
CanEncode = true;
}
void EncodeYUV420P(byte[] yuv420p, int[] argb, int width, int height)
{
int frameSize = width * height;
int chromasize = frameSize / 4;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + chromasize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++)
{
for (int i = 0; i < width; i++)
{
a = (int)(argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420p[yIndex++] = (byte)((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0)
{
yuv420p[uIndex++] = (byte)((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420p[vIndex++] = (byte)((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
}
Each time a new jpeg is generated "EncodeFrame" is called which is supposed to be changing it into a YUV420Planar format for the media codec. The codec I'm testing with doesn't support semiplanar.
In case someone comes across this later I changed
EncodeFrame to use a Surface instead and just used DrawBitmap().
It's slower than the byte copy but is working for my purposes.

Android JavaCV + Camera2

trying to record video from camera by using javaCV,
// recoder settings:
private int imageWidth = 320;
private int imageHeight = 240;
private int frameRate = 30;
recorder = new FFmpegFrameRecorder(ffmpeg_link, imageWidth, imageHeight, 1);
recorder.setFormat("mp4");
recorder.setFrameRate(frameRate);
// frame settings:
IplImage yuvIplimage = null;
yuvIplimage = IplImage.create(320, 320, IPL_DEPTH_16U, 1); //32 not supported
//image reader:
private ImageReader mImageReader;
mImageReader = ImageReader.newInstance(320, 320, ImageFormat.YUV_420_888, 10);
mImageReader.setOnImageAvailableListener(
mOnImageAvailableListener, mBackgroundHandler);
private final ImageReader.OnImageAvailableListener mOnImageAvailableListener
= new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
Image image = reader.acquireNextImage();// acquireLatestImage(); - also tried
if (image == null)
return;
final ByteBuffer buffer = image.getPlanes()[0].getBuffer();
byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes, 0, bytes.length);
if (yuvIplimage != null ) {
// OPTION 1
yuvIplimage.getByteBuffer().put(convertYUV420ToNV21(image));
// OPTION 2
//yuvIplimage.getByteBuffer().put(decodeYUV420SP(bytes,320,320));
try {
if (started) {
recorder.record(yuvIplimage);
}
} catch (Exception e) {
e.printStackTrace();
}
}
image.close();
}
};
Option 1 is to decode image to NV21 by using code:
private byte[] convertYUV420ToNV21(Image imgYUV420) {
byte[] rez;
ByteBuffer buffer0 = imgYUV420.getPlanes()[0].getBuffer();
ByteBuffer buffer2 = imgYUV420.getPlanes()[2].getBuffer();
int buffer0_size = buffer0.remaining();
int buffer2_size = buffer2.remaining();
rez = new byte[buffer0_size + buffer2_size];
buffer0.get(rez, 0, buffer0_size);
buffer2.get(rez, buffer0_size, buffer2_size);
return rez;
}
option 2 is to convert to rgb as if i understand corect:
public byte[] decodeYUV420SP( byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
byte rgb[]=new byte[width*height];
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = (byte) (0xff000000 | ((r << 6) & 0xff0000)
| ((g >> 2) & 0xff00) | ((b >> 10) & 0xff));
}
}
return rgb; }
it looks incorect also.
wisch is the correct way to conert camera2 image to IplImage?
and is it possible to do it on fly?
If the recorder requires NV21, then converting the image to that instead of RGB is likely the fastest option.
But why don't you just use android.media.MediaRecorder? It's much more efficient and can use the hardware encoders.
But if you need to stick with ffmpeg, your first option is incorrect for many devices. In addition, make sure you remove that buffer.get call earlier - it'll make the rest of the reads from plane 0 not work right, which may be your current problem. Once you read plane 0 once, .remaining() will return 0.
The YUV image has 3 planes, and unless you've checked that the underlying format is actually NV21, you shouldn't blindly assume that., or assume that the row stride is equal to width.
To be safe, you need to look at both row and pixel strides when copying the three planes into the semiplanar byte[].
If you're willing to move your processing over to C++ you could do the conversion like
cv::cvtColor((cv::_InputArray)mNV, (cv::_OutputArray)rgba, CV_YUV2RGBA_NV12, 0);
This will store the image as an RGBA mat and you can save it as normal from there.

bitmap to yuv , video recorded has only green pixels

I am trying to convert a bitmap to yuv, and recording this yuv in the ffmpeg frame recorder...
I am getting the video output with only green pixels, though when i check the properties of this video it shows the set Frame rate and the resolution...
The yuv encoding part is correct, but i feel i am making mistake somewhere else, mostly in returning the yuv bytes to recording part ( getByte(byte [] yuv ) because only there the yuv.length displayed in console is 0,, rest all methods return a big value in console ...
Kindly help...
#Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
directory.mkdirs();
addListenerOnButton();
play=(Button)findViewById(R.id.buttonplay);
stop=(Button)findViewById(R.id.buttonstop);
record=(Button)findViewById(R.id.buttonstart);
stop.setEnabled(false);
play.setEnabled(false);
record.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
startRecording();
getByte(new byte[]{});
}
});
stop.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
stopRecording();
}
});
play.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) throws IllegalArgumentException, SecurityException, IllegalStateException {
Intent intent = new Intent(Intent.ACTION_VIEW, Uri.parse(String.valueOf(asmileys)));
intent.setDataAndType(Uri.parse(String.valueOf(asmileys)), "video/mp4");
startActivity(intent);
Toast.makeText(getApplicationContext(), "Playing Video", Toast.LENGTH_LONG).show();
}
});
}
......//......
public void getByte(byte[] yuv) {
getNV21(640, 480, bitmap);
System.out.println(yuv.length + " ");
if (audioRecord == null || audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
startTime = System.currentTimeMillis();
return;
}
if (RECORD_LENGTH > 0) {
int i = imagesIndex++ % images.length;
yuvimage = images[i];
timestamps[i] = 1000 * (System.currentTimeMillis() - startTime);
}
/* get video data */
if (yuvimage != null && recording) {
((ByteBuffer) yuvimage.image[0].position(0)).put(yuv);
if (RECORD_LENGTH <= 0) {
try {
long t = 1000 * (System.currentTimeMillis() - startTime);
if (t > recorder.getTimestamp()) {
recorder.setTimestamp(t);
}
recorder.record(yuvimage);
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
}
}
}
public byte [] getNV21(int inputWidth, int inputHeight, Bitmap bitmap) {
int[] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
bitmap.recycle();
System.out.println(yuv.length + " ");
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize;
System.out.println(yuv420sp.length + " " + frameSize);
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[vIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
.....//.....
public void addListenerOnButton() {
image = (ImageView) findViewById(R.id.imageView);
image.setDrawingCacheEnabled(true);
image.buildDrawingCache();
bitmap = image.getDrawingCache();
System.out.println(bitmap.getByteCount() + " " );
button = (Button) findViewById(R.id.btn1);
button.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View view){
image.setImageResource(R.drawable.image1);
}
});
......//......
EDIT 1:
I made few changes in the above code:
record.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
startRecording();
getByte();
}
});
.....//....
public void getbyte() {
byte[] yuv = getNV21(640, 480, bitmap);
So now in the console; i get same yuv length in this method as the yuv length from getNV21 method..
But now i am getting half screen Black and Half screen green(black above and green below) pixels in the recorded video...
If i add these lines to onCreate method;
image = (ImageView) findViewById(R.id.imageView);
image.setDrawingCacheEnabled(true);
image.buildDrawingCache();
bitmap = image.getDrawingCache();
I do get distorted frames( frames are 1/4th of the image displayed with mix up of colors here and there) in the video....
All i am trying to learn is the image processing and flow of Bytes[] from one method to another; but i am still a noob..;
Kindly help..!

Android JavaCV create IplImage from Camera to use with ColorHistogram

I am using JavaCV in Android.
In my code, I have created a ImageComparator(class of OpenCV CookBook
http://code.google.com/p/javacv/source/browse/OpenCV2_Cookbook/src/opencv2_cookbook/chapter04/ImageComparator.scala?repo=examples
http://code.google.com/p/javacv/wiki/OpenCV2_Cookbook_Examples_Chapter_4) Object and use that object to compare images. If I use file from SD card the comparator is working.
File referenceImageFile = new File(absPath1); // Read an image.
IplImage reference = Util.loadOrExit(referenceImageFile,CV_LOAD_IMAGE_COLOR);
comparator = new ImageComparator(reference);
comparator = new ImageComparator(reference);
But from Camera Preview, when I am creating IplImage it is not working. I am getting the following Exception during comparison "score" calculation.
score = referenceComparator.compare(grayImage) / imageSize;
java.lang.RuntimeException: /home/saudet/android/OpenCV-2.4.2/modules/core/src/convert.cpp:1196: error: (-215) i < src.channels() in function void cvSplit(const void*, void*, void*, void*, void*)
For CameraPreview I am using the code from FacePreview to create IplImage.But it create Image in grayScale.
int f = SUBSAMPLING_FACTOR;
if (grayImage == null || grayImage.width() != width / f
|| grayImage.height() != height / f) {
grayImage = IplImage.create(width / f, height / f, IPL_DEPTH_8U, 1);
}
int imageWidth = grayImage.width();
int imageHeight = grayImage.height();
int dataStride = f * width;
int imageStride = grayImage.widthStep();
ByteBuffer imageBuffer = grayImage.getByteBuffer();
for (int y = 0; y < imageHeight; y++) {
int dataLine = y * dataStride;
int imageLine = y * imageStride;
for (int x = 0; x < imageWidth; x++) {
imageBuffer.put(imageLine + x, data[dataLine + f * x]);
}
}
How to create a Color IplImage from Camera to use with ImageComparator?
The below code seems to be working fine.
public void onPreviewFrame(final byte[] data, final Camera camera) {
try {
Camera.Size size = camera.getParameters().getPreviewSize();
processImage(data, size.width, size.height);
camera.addCallbackBuffer(data);
} catch (RuntimeException e) {
// The camera has probably just been released, ignore.
Log.d("Exception", " " + e);
}
}
protected void processImage(byte[] data, int width, int height) {
score.clear();
// First, downsample our image
int f = SUBSAMPLING_FACTOR;
IplImage _4image = IplImage.create(width, height, IPL_DEPTH_8U, f);
int[] _temp = new int[width * height];
if (_4image != null) {
decodeYUV420SP(_temp, data, width, height);
_4image.getIntBuffer().put(_temp);
}
//bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
//bitmap.copyPixelsFromBuffer(_4image.getByteBuffer());
Log.d("CompareAndroid", "processImage");
int imageSize = _4image.width() * _4image.height();
Iterator<ImageComparator> iterator = reference_List.iterator();
// Compute histogram match and normalize by image size.
// 1 means perfect match.
while(iterator.hasNext()){
score.add(((ImageComparator) iterator.next()).compare(_4image) / imageSize);
}
Log.d("CompareImages", "Score Size "+score.size());
postInvalidate();
}
This code seems to be working fine.
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width,
int height) {
int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((b << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((r >> 10) & 0xff);
}
}
}
I haven't tested it, but something like this should work:
IplImage yuvimage = IplImage.create(width, height * 3 / 2, IPL_DEPTH_8U, 2);
IplImage rgbimage = IplImage.create(width, height, IPL_DEPTH_8U, 3);
cvCvtColor(yuvimage, rgbimage, CV_YUV2BGR_NV21);

Categories

Resources