Unable to change ARGB to YUV420Planar - android

I'm attempting to use MediaCodec and MediaMuxer to change a series of JPEGs into a mp4.
No matter what I do, I always get a green staticy screen as output on the MP4.
Code Follows:
public class AvcEncoder
{
public bool CanEncode = true;
MediaCodec codec;
MediaMuxer muxer;
MediaFormat format;
public AvcEncoder()
{
codec = MediaCodec.CreateEncoderByType("video/avc");
format = MediaFormat.CreateVideoFormat("video/avc", 720, 480);
format.SetInteger(MediaFormat.KeyBitRate, 700000);
format.SetInteger(MediaFormat.KeyFrameRate, 10);
format.SetInteger(MediaFormat.KeyColorFormat, (int)Android.Media.MediaCodecCapabilities.Formatyuv420planar);
format.SetInteger(MediaFormat.KeyIFrameInterval, 5);
codec.Configure(format, null, null, MediaCodecConfigFlags.Encode);
codec.Start();
Java.IO.File f = new Java.IO.File(Android.OS.Environment.ExternalStorageDirectory, "Parkingdom");
if (!f.Exists())
{
f.Mkdirs();
}
muxer = new MediaMuxer(f.ToString() + "/test.mp4", MuxerOutputType.Mpeg4);
}
public void EncodeFrame(Bitmap image)
{
int mWidth = image.Width;
int mHeight = image.Height;
int[] mIntArray = new int[mWidth * mHeight];
// Copy pixel data from the Bitmap into the 'intArray' array
image.GetPixels(mIntArray, 0, mWidth, 0, 0, mWidth, mHeight);
byte[] byteArray = new byte[mWidth * mHeight * 3 / 2];
// Call to encoding function : convert intArray to Yuv Binary data
EncodeYUV420P(byteArray, mIntArray, mWidth, mHeight);
using (var stream = new MemoryStream())
{
image.Compress(Bitmap.CompressFormat.Png, 100, stream);
byteArray = stream.ToArray();
}
int inputBufferIndex = codec.DequeueInputBuffer(-1);
if (inputBufferIndex >= 0)
{
ByteBuffer buffer = codec.GetInputBuffer(inputBufferIndex);
buffer.Clear();
buffer.Put(byteArray);
codec.QueueInputBuffer(inputBufferIndex, 0, byteArray.Length, 0, 0);
}
}
public void SaveMp4()
{
CanEncode = false;
bool running = true;
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int track = -1;
while (running)
{
int index = codec.DequeueOutputBuffer(bufferInfo, 10000);
if (index == (int)MediaCodecInfoState.OutputFormatChanged)
{
MediaFormat format = codec.OutputFormat;
track = muxer.AddTrack(format);
muxer.Start();
}
else if (index == (int)MediaCodecInfoState.TryAgainLater)
{
break;
}
else if (index >= 0)
{
if ((bufferInfo.Flags & MediaCodecBufferFlags.CodecConfig) != 0)
{
bufferInfo.Size = 0;
}
if (track != -1)
{
ByteBuffer outBuffer = codec.GetOutputBuffer(index);
outBuffer.Position(bufferInfo.Offset);
outBuffer.Limit(bufferInfo.Offset + bufferInfo.Size);
muxer.WriteSampleData(track, outBuffer, bufferInfo);
codec.ReleaseOutputBuffer(index, false);
}
}
}
codec.Stop();
codec.Release();
muxer.Stop();
muxer.Release();
CanEncode = true;
}
void EncodeYUV420P(byte[] yuv420p, int[] argb, int width, int height)
{
int frameSize = width * height;
int chromasize = frameSize / 4;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + chromasize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++)
{
for (int i = 0; i < width; i++)
{
a = (int)(argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420p[yIndex++] = (byte)((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0)
{
yuv420p[uIndex++] = (byte)((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420p[vIndex++] = (byte)((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
}
Each time a new jpeg is generated "EncodeFrame" is called which is supposed to be changing it into a YUV420Planar format for the media codec. The codec I'm testing with doesn't support semiplanar.

In case someone comes across this later I changed
EncodeFrame to use a Surface instead and just used DrawBitmap().
It's slower than the byte copy but is working for my purposes.

Related

How to convert Bitmap to nv21 byte[] array in android

I
I have tried this method to convert but result image is getting currupted
ByteArrayOutputStream outputStream=new ByteArrayOutputStream();
scaled.compress(Bitmap.CompressFormat.JPEG,100, outputStream);
byte [] bytes = outp`enter code here`utStream.toByteArray();
ByteArrayOutputStream out = new ByteArrayOutputStream();
YuvImage image = new YuvImage(bytes, ImageFormat.NV21, inputWidth, inputHeight, null);
int quality = 100; //
image.compressToJpeg(new Rect(0, 0, inputWidth, inputHeight), quality, out);
byte[] yuv = out.toByteArray();
I use the following code:
public interface ImageTestUtils {
static byte[] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {
int[] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
static void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
}
index++;
}
}
}
}

Android JavaCV + Camera2

trying to record video from camera by using javaCV,
// recoder settings:
private int imageWidth = 320;
private int imageHeight = 240;
private int frameRate = 30;
recorder = new FFmpegFrameRecorder(ffmpeg_link, imageWidth, imageHeight, 1);
recorder.setFormat("mp4");
recorder.setFrameRate(frameRate);
// frame settings:
IplImage yuvIplimage = null;
yuvIplimage = IplImage.create(320, 320, IPL_DEPTH_16U, 1); //32 not supported
//image reader:
private ImageReader mImageReader;
mImageReader = ImageReader.newInstance(320, 320, ImageFormat.YUV_420_888, 10);
mImageReader.setOnImageAvailableListener(
mOnImageAvailableListener, mBackgroundHandler);
private final ImageReader.OnImageAvailableListener mOnImageAvailableListener
= new ImageReader.OnImageAvailableListener() {
#Override
public void onImageAvailable(ImageReader reader) {
Image image = reader.acquireNextImage();// acquireLatestImage(); - also tried
if (image == null)
return;
final ByteBuffer buffer = image.getPlanes()[0].getBuffer();
byte[] bytes = new byte[buffer.remaining()];
buffer.get(bytes, 0, bytes.length);
if (yuvIplimage != null ) {
// OPTION 1
yuvIplimage.getByteBuffer().put(convertYUV420ToNV21(image));
// OPTION 2
//yuvIplimage.getByteBuffer().put(decodeYUV420SP(bytes,320,320));
try {
if (started) {
recorder.record(yuvIplimage);
}
} catch (Exception e) {
e.printStackTrace();
}
}
image.close();
}
};
Option 1 is to decode image to NV21 by using code:
private byte[] convertYUV420ToNV21(Image imgYUV420) {
byte[] rez;
ByteBuffer buffer0 = imgYUV420.getPlanes()[0].getBuffer();
ByteBuffer buffer2 = imgYUV420.getPlanes()[2].getBuffer();
int buffer0_size = buffer0.remaining();
int buffer2_size = buffer2.remaining();
rez = new byte[buffer0_size + buffer2_size];
buffer0.get(rez, 0, buffer0_size);
buffer2.get(rez, buffer0_size, buffer2_size);
return rez;
}
option 2 is to convert to rgb as if i understand corect:
public byte[] decodeYUV420SP( byte[] yuv420sp, int width, int height) {
final int frameSize = width * height;
byte rgb[]=new byte[width*height];
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = (byte) (0xff000000 | ((r << 6) & 0xff0000)
| ((g >> 2) & 0xff00) | ((b >> 10) & 0xff));
}
}
return rgb; }
it looks incorect also.
wisch is the correct way to conert camera2 image to IplImage?
and is it possible to do it on fly?
If the recorder requires NV21, then converting the image to that instead of RGB is likely the fastest option.
But why don't you just use android.media.MediaRecorder? It's much more efficient and can use the hardware encoders.
But if you need to stick with ffmpeg, your first option is incorrect for many devices. In addition, make sure you remove that buffer.get call earlier - it'll make the rest of the reads from plane 0 not work right, which may be your current problem. Once you read plane 0 once, .remaining() will return 0.
The YUV image has 3 planes, and unless you've checked that the underlying format is actually NV21, you shouldn't blindly assume that., or assume that the row stride is equal to width.
To be safe, you need to look at both row and pixel strides when copying the three planes into the semiplanar byte[].
If you're willing to move your processing over to C++ you could do the conversion like
cv::cvtColor((cv::_InputArray)mNV, (cv::_OutputArray)rgba, CV_YUV2RGBA_NV12, 0);
This will store the image as an RGBA mat and you can save it as normal from there.

How to detect average pixel intensity from live camera preview?

I am building a scanner app, and trying to determine the "preview quality" from the preview callback of the camera. I want to customize the camera's AUTO_FLASH_MODE where it will be turned on if the environment is too dark.
How can I detect if there is a high average of dark pixels? This means (in preview) I am getting darkness and therefore need to turn on the camera's flash light.
Either find out how to access pixel values of your image and calculate the average intensity yourself or use any image processing library to do so.
Dark pixels have low values, bright pixels have high values.
You want to calculate the average of all red, green and blue values divided by three times your pixel count.
Define a threshold for when to turn on the flash, but keep in mind that you have to get a new exposure time then.
Prefer flash over exposure time increase as long exposure times yield higher image noise.
I tried this approach but i think it is taking unnecessary time of processing the bitmap and then get an average screen color,
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
Size cameraResolution = resolution;
PreviewCallback callback = this.callback;
if (cameraResolution != null && callback != null)
{
int format = camera.getParameters().getPreviewFormat();
SourceData source = new SourceData(data, cameraResolution.width, cameraResolution.height, format, getCameraRotation());
callback.onPreview(source);
final int[] rgb = decodeYUV420SP(data, cameraResolution.width, cameraResolution.height);
//Bitmap bmp = decodeBitmap(source.getData());
Bitmap bmp = Bitmap.createBitmap(rgb, cameraResolution.width, cameraResolution.height, Bitmap.Config.ARGB_8888);
if (bmp != null)
{
//bmp = decodeBitmap(source.getData());
ByteArrayOutputStream bytes = new ByteArrayOutputStream();
// bmp.compress(Bitmap.CompressFormat.JPEG, 70, bytes);
Bitmap resizebitmap = Bitmap.createBitmap(bmp,
bmp.getWidth() / 2, bmp.getHeight() / 2, 60, 60);
int color = getAverageColor(resizebitmap);
Log.i("Color Int", color + "");
// int color = resizebitmap.getPixel(resizebitmap.getWidth()/2,resizebitmap.getHeight()/2);
String strColor = String.format("#%06X", 0xFFFFFF & color);
//String colorname = sColorNameMap.get(strColor);
Log.d("strColor", strColor);
Log.i("strColor", color + "");
if(!mIsOn)
{
if (color == -16777216 || color < -16777216)//minimum color code (full dark)
{
mIsOn = true;
setTorch(true);
Log.d("Yahooooo", "" + color);
}
}
Log.i("Pixel Value",
"Top Left pixel: " + Integer.toHexString(color));
}
}
else
{
Log.d(TAG, "Got preview callback, but no handler or resolution available");
}
}
}
private int[] decodeYUV420SP(byte[] yuv420sp, int width, int height)
{
final int frameSize = width * height;
int rgb[]=new int[width*height];
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0) y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0) r = 0; else if (r > 262143) r = 262143;
if (g < 0) g = 0; else if (g > 262143) g = 262143;
if (b < 0) b = 0; else if (b > 262143) b = 262143;
rgb[yp] = 0xff000000 | ((r << 6) & 0xff0000) | ((g >> 2) &
0xff00) | ((b >> 10) & 0xff);
}
}
return rgb;
}
private int getAverageColor(Bitmap bitmap)
{
int redBucket = 0;
int greenBucket = 0;
int blueBucket = 0;
int pixelCount = 0;
for (int y = 0; y < bitmap.getHeight(); y++) {
for (int x = 0; x < bitmap.getWidth(); x++) {
int c = bitmap.getPixel(x, y);
pixelCount++;
redBucket += Color.red(c);
greenBucket += Color.green(c);
blueBucket += Color.blue(c);
// does alpha matter?
}
}
int averageColor = Color.rgb(redBucket / pixelCount, greenBucket
/ pixelCount, blueBucket / pixelCount);
return averageColor;
}

How to overwrite onPreviewFrame data with static YUV frame?

My application overrides the onPreviewFrame callback to pass the current camera frame to a webrtc native function. This works perfectly, however I want to be able to switch to sending a static frame instead of video, if that option has been selected in my app.
So far I have created a YUV NV21 image, which I am storing in the assets dir. All attempts to pass that frame down to the native function have resulted in purple/green stripes rather than the actual image.
This is what I have so far;
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
previewBufferLock.lock();
if (mFrameProvider.isEnabled()) {
mFrameProvider.overwriteWithFrame(data, expectedFrameSize);
}
if (isCaptureRunning) {
if (data.length == expectedFrameSize) {
ProvideCameraFrame(data, expectedFrameSize, context);
cameraUtils.addCallbackBuffer(camera, data);
}
}
previewBufferLock.unlock();
}
#Override
public byte[] overwriteWithPreviewFrame(byte[] data, int expectedFrameSize) {
if (mFrameData == null) {
loadPreviewFrame();
}
for (int i=0; i < expectedFrameSize; i++) {
if (i < mFrameData.length) {
data[i] = mFrameData[i];
}
}
return data;
}
And
private void loadPreviewFrame() {
try {
InputStream open = mContext.getResources().getAssets().open(PREVIEW_FRAME_FILE);
mFrameData = IOUtils.toByteArray(open);
open.close();
} catch (Exception e) {
Log.e("", "", e);
}
}
I have tried converting the image to a bitmap too. So the question is how can I open a YUV frame from assets and convert it into a suitable format to pass to the native methods.
Results in the following output;
Right after a long fight with Android API I have managed to get this working.
There were two issues that caused the green/purple output;
Loss of data: the generated YUV frame was larger than the original preview frame at the same resolution, so the data being passed down to the native code was missing around 30% of its image data.
Wrong resolution: the native code required the resolution of the preview frame and not the camera.
Below is a working solution for anyone who wishes to add a static frame;
so updated code:
#Override
public byte[] getPreviewFrameData(int width, int height) {
if (mPreviewFrameData == null) {
loadPreviewFrame(width, height);
}
return mPreviewFrameData;
}
private void loadPreviewFrame(int width, int height) {
try {
Bitmap previewImage = BitmapFactory.decodeResource(mContext.getResources(), R.drawable.frame);
Bitmap resizedPreviewImage = Bitmap.createScaledBitmap(previewImage, width, height, false);
BitmapConverter bitmapConverter = new BitmapConverter();
mPreviewFrameData = bitmapConverter.convertToNV21(resizedPreviewImage);
} catch (Exception e) {
Log.e("DisabledCameraFrameProvider", "Failed to loadPreviewFrame");
}
}
class BitmapConverter {
byte [] convertToNV21(Bitmap bitmap) {
int inputWidth = bitmap.getWidth();
int inputHeight = bitmap.getHeight();
int [] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
bitmap.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff);
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
}
Then finally in your callback;
public void onPreviewFrame(byte[] data, Camera camera) {
byte[] bytes = data;
if (!mProvider.isVideoEnabled()) {
Camera.Size previewSize = camera.getParameters().getPreviewSize();
bytes = mProvider.getPreviewFrameData(previewSize.width, previewSize.height);
}
ProvideCameraFrame(bytes, bytes.length, context);
}
The key was to scale the image to the camera preview size and convert the image to YUV colour space.

Android JavaCV create IplImage from Camera to use with ColorHistogram

I am using JavaCV in Android.
In my code, I have created a ImageComparator(class of OpenCV CookBook
http://code.google.com/p/javacv/source/browse/OpenCV2_Cookbook/src/opencv2_cookbook/chapter04/ImageComparator.scala?repo=examples
http://code.google.com/p/javacv/wiki/OpenCV2_Cookbook_Examples_Chapter_4) Object and use that object to compare images. If I use file from SD card the comparator is working.
File referenceImageFile = new File(absPath1); // Read an image.
IplImage reference = Util.loadOrExit(referenceImageFile,CV_LOAD_IMAGE_COLOR);
comparator = new ImageComparator(reference);
comparator = new ImageComparator(reference);
But from Camera Preview, when I am creating IplImage it is not working. I am getting the following Exception during comparison "score" calculation.
score = referenceComparator.compare(grayImage) / imageSize;
java.lang.RuntimeException: /home/saudet/android/OpenCV-2.4.2/modules/core/src/convert.cpp:1196: error: (-215) i < src.channels() in function void cvSplit(const void*, void*, void*, void*, void*)
For CameraPreview I am using the code from FacePreview to create IplImage.But it create Image in grayScale.
int f = SUBSAMPLING_FACTOR;
if (grayImage == null || grayImage.width() != width / f
|| grayImage.height() != height / f) {
grayImage = IplImage.create(width / f, height / f, IPL_DEPTH_8U, 1);
}
int imageWidth = grayImage.width();
int imageHeight = grayImage.height();
int dataStride = f * width;
int imageStride = grayImage.widthStep();
ByteBuffer imageBuffer = grayImage.getByteBuffer();
for (int y = 0; y < imageHeight; y++) {
int dataLine = y * dataStride;
int imageLine = y * imageStride;
for (int x = 0; x < imageWidth; x++) {
imageBuffer.put(imageLine + x, data[dataLine + f * x]);
}
}
How to create a Color IplImage from Camera to use with ImageComparator?
The below code seems to be working fine.
public void onPreviewFrame(final byte[] data, final Camera camera) {
try {
Camera.Size size = camera.getParameters().getPreviewSize();
processImage(data, size.width, size.height);
camera.addCallbackBuffer(data);
} catch (RuntimeException e) {
// The camera has probably just been released, ignore.
Log.d("Exception", " " + e);
}
}
protected void processImage(byte[] data, int width, int height) {
score.clear();
// First, downsample our image
int f = SUBSAMPLING_FACTOR;
IplImage _4image = IplImage.create(width, height, IPL_DEPTH_8U, f);
int[] _temp = new int[width * height];
if (_4image != null) {
decodeYUV420SP(_temp, data, width, height);
_4image.getIntBuffer().put(_temp);
}
//bitmap = Bitmap.createBitmap(width, height, Bitmap.Config.ARGB_8888);
//bitmap.copyPixelsFromBuffer(_4image.getByteBuffer());
Log.d("CompareAndroid", "processImage");
int imageSize = _4image.width() * _4image.height();
Iterator<ImageComparator> iterator = reference_List.iterator();
// Compute histogram match and normalize by image size.
// 1 means perfect match.
while(iterator.hasNext()){
score.add(((ImageComparator) iterator.next()).compare(_4image) / imageSize);
}
Log.d("CompareImages", "Score Size "+score.size());
postInvalidate();
}
This code seems to be working fine.
private void decodeYUV420SP(int[] rgb, byte[] yuv420sp, int width,
int height) {
int frameSize = width * height;
for (int j = 0, yp = 0; j < height; j++) {
int uvp = frameSize + (j >> 1) * width, u = 0, v = 0;
for (int i = 0; i < width; i++, yp++) {
int y = (0xff & ((int) yuv420sp[yp])) - 16;
if (y < 0)
y = 0;
if ((i & 1) == 0) {
v = (0xff & yuv420sp[uvp++]) - 128;
u = (0xff & yuv420sp[uvp++]) - 128;
}
int y1192 = 1192 * y;
int r = (y1192 + 1634 * v);
int g = (y1192 - 833 * v - 400 * u);
int b = (y1192 + 2066 * u);
if (r < 0)
r = 0;
else if (r > 262143)
r = 262143;
if (g < 0)
g = 0;
else if (g > 262143)
g = 262143;
if (b < 0)
b = 0;
else if (b > 262143)
b = 262143;
rgb[yp] = 0xff000000 | ((b << 6) & 0xff0000) | ((g >> 2) & 0xff00) | ((r >> 10) & 0xff);
}
}
}
I haven't tested it, but something like this should work:
IplImage yuvimage = IplImage.create(width, height * 3 / 2, IPL_DEPTH_8U, 2);
IplImage rgbimage = IplImage.create(width, height, IPL_DEPTH_8U, 3);
cvCvtColor(yuvimage, rgbimage, CV_YUV2BGR_NV21);

Categories

Resources