I'm working on a video conference app using openvidu. We are trying to include wikitude AR session in the call.
The problem is that both of them requires access to the camera so I have the next scenario: if I instantiate the local participant video first I can't start the wikitude AR session because video don't load. If I instantiate the wikitude session firstly the other participants of the call don't see the device video.
I was able to create a custom video capturer for openvidu, that imitates the camera. It is required to send every frame for it to works.
package org.webrtc;
import android.content.Context;
import android.graphics.Bitmap;
import java.util.Timer;
import java.util.TimerTask;
import java.util.concurrent.atomic.AtomicReference;
public class CustomVideoCapturer implements VideoCapturer {
private final static String TAG = "FileVideoCapturer";
//private final FileVideoCapturer.VideoReader videoReader;
private final Timer timer = new Timer();
private CapturerObserver capturerObserver;
private AtomicReference<Bitmap> image = new AtomicReference<Bitmap>();
private final TimerTask tickTask = new TimerTask() {
#Override
public void run() {
tick();
}
};
public CustomVideoCapturer() {
}
public void tick() {
Bitmap frame = image.get();
if (frame != null && !frame.isRecycled()) {
NV21Buffer nv21Buffer = new NV21Buffer(getNV21(frame),frame.getWidth(),frame.getHeight(), null);
VideoFrame videoFrame = new VideoFrame(nv21Buffer, 0, System.nanoTime());
capturerObserver.onFrameCaptured(videoFrame);
}
}
byte [] getNV21(Bitmap image) {
int [] argb = new int[image.getWidth() * image.getHeight()];
image.getPixels(argb, 0, image.getWidth(), 0, 0, image.getWidth(), image.getHeight());
byte [] yuv = new byte[image.getWidth()*image.getHeight()*3/2];
encodeYUV420SP(yuv, argb, image.getWidth(), image.getHeight());
image.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
public void sendFrame(Bitmap bitmap) {
image.set(bitmap);
}
#Override
public void initialize(SurfaceTextureHelper surfaceTextureHelper, Context applicationContext,
CapturerObserver capturerObserver) {
this.capturerObserver = capturerObserver;
}
#Override
public void startCapture(int width, int height, int framerate) {
//timer.schedule(tickTask, 0, 1000 / framerate);
threadCV().start();
}
Thread threadCV() {
return new Thread() {
#Override
public void run() {
while (true) {
if (image.get() != null) {
tick();
}
try {
Thread.sleep(10);
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
};
}
#Override
public void stopCapture() throws InterruptedException {
timer.cancel();
}
#Override
public void changeCaptureFormat(int width, int height, int framerate) {
// Empty on purpose
}
#Override
public void dispose() {
//videoReader.close();
}
#Override
public boolean isScreencast() {
return false;
}
private interface VideoReader {
VideoFrame getNextFrame();
void close();
}
/**
* Read video data from file for the .y4m container.
*/
}
On the local participant I than use this function to send the frame:
public void sendFrame(Bitmap frame) {
customVideoCapturer.sendFrame(frame);
}
But I wasn't be able to thake the frames from the wikitude Camera. There is a way to access the frames and resend them?
Such as of the Native Api sdk, version 9.10.0, according to answer from wikitude support
https://support.wikitude.com/support/discussions/topics/5000096719?page=1 , to access the camera frames a custom plugin should be created:
https://www.wikitude.com/external/doc/documentation/latest/androidnative/pluginsapi.html#plugins-api
Related
I'm attempting to use MediaCodec and MediaMuxer to change a series of JPEGs into a mp4.
No matter what I do, I always get a green staticy screen as output on the MP4.
Code Follows:
public class AvcEncoder
{
public bool CanEncode = true;
MediaCodec codec;
MediaMuxer muxer;
MediaFormat format;
public AvcEncoder()
{
codec = MediaCodec.CreateEncoderByType("video/avc");
format = MediaFormat.CreateVideoFormat("video/avc", 720, 480);
format.SetInteger(MediaFormat.KeyBitRate, 700000);
format.SetInteger(MediaFormat.KeyFrameRate, 10);
format.SetInteger(MediaFormat.KeyColorFormat, (int)Android.Media.MediaCodecCapabilities.Formatyuv420planar);
format.SetInteger(MediaFormat.KeyIFrameInterval, 5);
codec.Configure(format, null, null, MediaCodecConfigFlags.Encode);
codec.Start();
Java.IO.File f = new Java.IO.File(Android.OS.Environment.ExternalStorageDirectory, "Parkingdom");
if (!f.Exists())
{
f.Mkdirs();
}
muxer = new MediaMuxer(f.ToString() + "/test.mp4", MuxerOutputType.Mpeg4);
}
public void EncodeFrame(Bitmap image)
{
int mWidth = image.Width;
int mHeight = image.Height;
int[] mIntArray = new int[mWidth * mHeight];
// Copy pixel data from the Bitmap into the 'intArray' array
image.GetPixels(mIntArray, 0, mWidth, 0, 0, mWidth, mHeight);
byte[] byteArray = new byte[mWidth * mHeight * 3 / 2];
// Call to encoding function : convert intArray to Yuv Binary data
EncodeYUV420P(byteArray, mIntArray, mWidth, mHeight);
using (var stream = new MemoryStream())
{
image.Compress(Bitmap.CompressFormat.Png, 100, stream);
byteArray = stream.ToArray();
}
int inputBufferIndex = codec.DequeueInputBuffer(-1);
if (inputBufferIndex >= 0)
{
ByteBuffer buffer = codec.GetInputBuffer(inputBufferIndex);
buffer.Clear();
buffer.Put(byteArray);
codec.QueueInputBuffer(inputBufferIndex, 0, byteArray.Length, 0, 0);
}
}
public void SaveMp4()
{
CanEncode = false;
bool running = true;
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
int track = -1;
while (running)
{
int index = codec.DequeueOutputBuffer(bufferInfo, 10000);
if (index == (int)MediaCodecInfoState.OutputFormatChanged)
{
MediaFormat format = codec.OutputFormat;
track = muxer.AddTrack(format);
muxer.Start();
}
else if (index == (int)MediaCodecInfoState.TryAgainLater)
{
break;
}
else if (index >= 0)
{
if ((bufferInfo.Flags & MediaCodecBufferFlags.CodecConfig) != 0)
{
bufferInfo.Size = 0;
}
if (track != -1)
{
ByteBuffer outBuffer = codec.GetOutputBuffer(index);
outBuffer.Position(bufferInfo.Offset);
outBuffer.Limit(bufferInfo.Offset + bufferInfo.Size);
muxer.WriteSampleData(track, outBuffer, bufferInfo);
codec.ReleaseOutputBuffer(index, false);
}
}
}
codec.Stop();
codec.Release();
muxer.Stop();
muxer.Release();
CanEncode = true;
}
void EncodeYUV420P(byte[] yuv420p, int[] argb, int width, int height)
{
int frameSize = width * height;
int chromasize = frameSize / 4;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize + chromasize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++)
{
for (int i = 0; i < width; i++)
{
a = (int)(argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420p[yIndex++] = (byte)((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0)
{
yuv420p[uIndex++] = (byte)((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420p[vIndex++] = (byte)((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
}
Each time a new jpeg is generated "EncodeFrame" is called which is supposed to be changing it into a YUV420Planar format for the media codec. The codec I'm testing with doesn't support semiplanar.
In case someone comes across this later I changed
EncodeFrame to use a Surface instead and just used DrawBitmap().
It's slower than the byte copy but is working for my purposes.
I am trying to convert a bitmap to yuv, and recording this yuv in the ffmpeg frame recorder...
I am getting the video output with only green pixels, though when i check the properties of this video it shows the set Frame rate and the resolution...
The yuv encoding part is correct, but i feel i am making mistake somewhere else, mostly in returning the yuv bytes to recording part ( getByte(byte [] yuv ) because only there the yuv.length displayed in console is 0,, rest all methods return a big value in console ...
Kindly help...
#Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
directory.mkdirs();
addListenerOnButton();
play=(Button)findViewById(R.id.buttonplay);
stop=(Button)findViewById(R.id.buttonstop);
record=(Button)findViewById(R.id.buttonstart);
stop.setEnabled(false);
play.setEnabled(false);
record.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
startRecording();
getByte(new byte[]{});
}
});
stop.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
stopRecording();
}
});
play.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) throws IllegalArgumentException, SecurityException, IllegalStateException {
Intent intent = new Intent(Intent.ACTION_VIEW, Uri.parse(String.valueOf(asmileys)));
intent.setDataAndType(Uri.parse(String.valueOf(asmileys)), "video/mp4");
startActivity(intent);
Toast.makeText(getApplicationContext(), "Playing Video", Toast.LENGTH_LONG).show();
}
});
}
......//......
public void getByte(byte[] yuv) {
getNV21(640, 480, bitmap);
System.out.println(yuv.length + " ");
if (audioRecord == null || audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
startTime = System.currentTimeMillis();
return;
}
if (RECORD_LENGTH > 0) {
int i = imagesIndex++ % images.length;
yuvimage = images[i];
timestamps[i] = 1000 * (System.currentTimeMillis() - startTime);
}
/* get video data */
if (yuvimage != null && recording) {
((ByteBuffer) yuvimage.image[0].position(0)).put(yuv);
if (RECORD_LENGTH <= 0) {
try {
long t = 1000 * (System.currentTimeMillis() - startTime);
if (t > recorder.getTimestamp()) {
recorder.setTimestamp(t);
}
recorder.record(yuvimage);
} catch (FFmpegFrameRecorder.Exception e) {
e.printStackTrace();
}
}
}
}
public byte [] getNV21(int inputWidth, int inputHeight, Bitmap bitmap) {
int[] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
bitmap.recycle();
System.out.println(yuv.length + " ");
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uIndex = frameSize;
int vIndex = frameSize;
System.out.println(yuv420sp.length + " " + frameSize);
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[vIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
.....//.....
public void addListenerOnButton() {
image = (ImageView) findViewById(R.id.imageView);
image.setDrawingCacheEnabled(true);
image.buildDrawingCache();
bitmap = image.getDrawingCache();
System.out.println(bitmap.getByteCount() + " " );
button = (Button) findViewById(R.id.btn1);
button.setOnClickListener(new OnClickListener() {
#Override
public void onClick(View view){
image.setImageResource(R.drawable.image1);
}
});
......//......
EDIT 1:
I made few changes in the above code:
record.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
startRecording();
getByte();
}
});
.....//....
public void getbyte() {
byte[] yuv = getNV21(640, 480, bitmap);
So now in the console; i get same yuv length in this method as the yuv length from getNV21 method..
But now i am getting half screen Black and Half screen green(black above and green below) pixels in the recorded video...
If i add these lines to onCreate method;
image = (ImageView) findViewById(R.id.imageView);
image.setDrawingCacheEnabled(true);
image.buildDrawingCache();
bitmap = image.getDrawingCache();
I do get distorted frames( frames are 1/4th of the image displayed with mix up of colors here and there) in the video....
All i am trying to learn is the image processing and flow of Bytes[] from one method to another; but i am still a noob..;
Kindly help..!
My application overrides the onPreviewFrame callback to pass the current camera frame to a webrtc native function. This works perfectly, however I want to be able to switch to sending a static frame instead of video, if that option has been selected in my app.
So far I have created a YUV NV21 image, which I am storing in the assets dir. All attempts to pass that frame down to the native function have resulted in purple/green stripes rather than the actual image.
This is what I have so far;
#Override
public void onPreviewFrame(byte[] data, Camera camera) {
previewBufferLock.lock();
if (mFrameProvider.isEnabled()) {
mFrameProvider.overwriteWithFrame(data, expectedFrameSize);
}
if (isCaptureRunning) {
if (data.length == expectedFrameSize) {
ProvideCameraFrame(data, expectedFrameSize, context);
cameraUtils.addCallbackBuffer(camera, data);
}
}
previewBufferLock.unlock();
}
#Override
public byte[] overwriteWithPreviewFrame(byte[] data, int expectedFrameSize) {
if (mFrameData == null) {
loadPreviewFrame();
}
for (int i=0; i < expectedFrameSize; i++) {
if (i < mFrameData.length) {
data[i] = mFrameData[i];
}
}
return data;
}
And
private void loadPreviewFrame() {
try {
InputStream open = mContext.getResources().getAssets().open(PREVIEW_FRAME_FILE);
mFrameData = IOUtils.toByteArray(open);
open.close();
} catch (Exception e) {
Log.e("", "", e);
}
}
I have tried converting the image to a bitmap too. So the question is how can I open a YUV frame from assets and convert it into a suitable format to pass to the native methods.
Results in the following output;
Right after a long fight with Android API I have managed to get this working.
There were two issues that caused the green/purple output;
Loss of data: the generated YUV frame was larger than the original preview frame at the same resolution, so the data being passed down to the native code was missing around 30% of its image data.
Wrong resolution: the native code required the resolution of the preview frame and not the camera.
Below is a working solution for anyone who wishes to add a static frame;
so updated code:
#Override
public byte[] getPreviewFrameData(int width, int height) {
if (mPreviewFrameData == null) {
loadPreviewFrame(width, height);
}
return mPreviewFrameData;
}
private void loadPreviewFrame(int width, int height) {
try {
Bitmap previewImage = BitmapFactory.decodeResource(mContext.getResources(), R.drawable.frame);
Bitmap resizedPreviewImage = Bitmap.createScaledBitmap(previewImage, width, height, false);
BitmapConverter bitmapConverter = new BitmapConverter();
mPreviewFrameData = bitmapConverter.convertToNV21(resizedPreviewImage);
} catch (Exception e) {
Log.e("DisabledCameraFrameProvider", "Failed to loadPreviewFrame");
}
}
class BitmapConverter {
byte [] convertToNV21(Bitmap bitmap) {
int inputWidth = bitmap.getWidth();
int inputHeight = bitmap.getHeight();
int [] argb = new int[inputWidth * inputHeight];
bitmap.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte [] yuv = new byte[inputWidth*inputHeight*3/2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
bitmap.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff);
Y = ( ( 66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ( ( -38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ( ( 112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte)((V<0) ? 0 : ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte)((U<0) ? 0 : ((U > 255) ? 255 : U));
}
index ++;
}
}
}
}
Then finally in your callback;
public void onPreviewFrame(byte[] data, Camera camera) {
byte[] bytes = data;
if (!mProvider.isVideoEnabled()) {
Camera.Size previewSize = camera.getParameters().getPreviewSize();
bytes = mProvider.getPreviewFrameData(previewSize.width, previewSize.height);
}
ProvideCameraFrame(bytes, bytes.length, context);
}
The key was to scale the image to the camera preview size and convert the image to YUV colour space.
how can I convert images to video without using FFmpeg or JCodec, only with android MediaCodec. The images for video is bitmap files that can be ARGB888 or YUV420 (my choice). The most important thing is that the video have to be playable in android devices and the maximum API is 16. I know all about API 18 MediaMuxer and I can not use it.
Please help me, I am stuck on this for many days.
(JCodec to slow, and FFmpeg very complicated to use).
There is no simple way to do this in API 16 that works across all devices.
You will encounter problems with buffer alignment, color spaces, and the need to use different YUV layouts on different devices.
Consider the buffer alignment issue. On pre-API 18 devices with Qualcomm SOCs, you had to align the CbCr planes at a 2K offset from the start of the buffer. On API 18, all devices use the same layout; this is enforced by CTS tests that were added in Android 4.3.
Even with API 18 you still have to runtime detect whether the encoder wants planar or semi-planar values. (It's probably not relevant for your situation, but none of the YUV formats output by the camera are accepted by MediaCodec.) Note there is no RGB input to MediaCodec.
If portability is not a concern, i.e. you're targeting a specific device, your code will be much simpler.
There are code snippets in the SO pages linked above. The closest "official" example is the buffer-to-buffer / buffer-to-surface tests in EncodeDecodeTest. These are API 18 tests, which means they don't do the "if QC and API16 then change buffer alignment" dance, but they do show how to do the planar vs. semi-planar layout detection. It doesn't include an RGB-to-YUV color converter, but there are examples of such around the web.
On the bright side, the encoder output seems to be just fine on any device and API version.
Converting the raw H.264 stream to a .mp4 file requires a 3rd-party library, since as you noted MediaMuxer is not available. I believe some people have installed ffmpeg as a command-line utility and executed it that way (maybe like this?).
import android.app.Activity;
import android.app.ProgressDialog;
import android.graphics.Bitmap;
import android.graphics.BitmapFactory;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaCodecList;
import android.media.MediaFormat;
import android.media.MediaMuxer;
import android.os.Environment;
import android.os.Handler;
import android.util.Log;
import java.io.File;
import java.io.IOException;
import java.nio.ByteBuffer;
import java.text.DateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;
public class MMediaMuxer {
private static final String MIME_TYPE = "video/avc"; // H.264 Advanced Video Coding
private static int _width = 512;
private static int _height = 512;
private static final int BIT_RATE = 800000;
private static final int INFLAME_INTERVAL = 1;
private static final int FRAME_RATE = 10;
private static boolean DEBUG = false;
private MediaCodec mediaCodec;
private MediaMuxer mediaMuxer;
private boolean mRunning;
private int generateIndex = 0;
private int mTrackIndex;
private int MAX_FRAME_VIDEO = 0;
private List<byte[]> bitList;
private List<byte[]> bitFirst;
private List<byte[]> bitLast;
private int current_index_frame = 0;
private static final String TAG = "CODEC";
private String outputPath;
private Activity _activity;
private ProgressDialog pd;
private String _title;
private String _mess;
public void Init(Activity activity, int width, int height, String title, String mess) {
_title = title;
_mess = mess;
_activity = activity;
_width = width;
_height = height;
Logd("MMediaMuxer Init");
ShowProgressBar();
}
private Handler aHandler = new Handler();
public void AddFrame(final byte[] byteFrame) {
CheckDataListState();
new Thread(new Runnable() {
#Override
public void run() {
Logd("Android get Frame");
Bitmap bit = BitmapFactory.decodeByteArray(byteFrame, 0, byteFrame.length);
Logd("Android convert Bitmap");
byte[] byteConvertFrame = getNV21(bit.getWidth(), bit.getHeight(), bit);
Logd("Android convert getNV21");
bitList.add(byteConvertFrame);
}
}).start();
}
public void AddFrame(byte[] byteFrame, int count, boolean isLast) {
CheckDataListState();
Logd("Android get Frames = " + count);
Bitmap bit = BitmapFactory.decodeByteArray(byteFrame, 0, byteFrame.length);
Logd("Android convert Bitmap");
byteFrame = getNV21(bit.getWidth(), bit.getHeight(), bit);
Logd("Android convert getNV21");
for (int i = 0; i < count; i++) {
if (isLast) {
bitLast.add(byteFrame);
} else {
bitFirst.add(byteFrame);
}
}
}
public void CreateVideo() {
current_index_frame = 0;
Logd("Prepare Frames Data");
bitFirst.addAll(bitList);
bitFirst.addAll(bitLast);
MAX_FRAME_VIDEO = bitFirst.size();
Logd("CreateVideo");
mRunning = true;
bufferEncoder();
}
public boolean GetStateEncoder() {
return mRunning;
}
public String GetPath() {
return outputPath;
}
public void onBackPressed() {
mRunning = false;
}
public void ShowProgressBar() {
_activity.runOnUiThread(new Runnable() {
public void run() {
pd = new ProgressDialog(_activity);
pd.setTitle(_title);
pd.setCancelable(false);
pd.setMessage(_mess);
pd.setCanceledOnTouchOutside(false);
pd.show();
}
});
}
public void HideProgressBar() {
new Thread(new Runnable() {
#Override
public void run() {
_activity.runOnUiThread(new Runnable() {
#Override
public void run() {
pd.dismiss();
}
});
}
}).start();
}
private void bufferEncoder() {
Runnable runnable = new Runnable() {
#Override
public void run() {
try {
Logd("PrepareEncoder start");
PrepareEncoder();
Logd("PrepareEncoder end");
} catch (IOException e) {
Loge(e.getMessage());
}
try {
while (mRunning) {
Encode();
}
} finally {
Logd("release");
Release();
HideProgressBar();
bitFirst = null;
bitLast = null;
}
}
};
Thread thread = new Thread(runnable);
thread.start();
}
public void ClearTask() {
bitList = null;
bitFirst = null;
bitLast = null;
}
private void PrepareEncoder() throws IOException {
MediaCodecInfo codecInfo = selectCodec(MIME_TYPE);
if (codecInfo == null) {
Loge("Unable to find an appropriate codec for " + MIME_TYPE);
}
Logd("found codec: " + codecInfo.getName());
int colorFormat;
try {
colorFormat = selectColorFormat(codecInfo, MIME_TYPE);
} catch (Exception e) {
colorFormat = MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar;
}
mediaCodec = MediaCodec.createByCodecName(codecInfo.getName());
MediaFormat mediaFormat = MediaFormat.createVideoFormat(MIME_TYPE, _width, _height);
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
mediaFormat.setInteger(MediaFormat.KEY_FRAME_RATE, FRAME_RATE);
mediaFormat.setInteger(MediaFormat.KEY_COLOR_FORMAT, colorFormat);
mediaFormat.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, INFLAME_INTERVAL);
mediaCodec.configure(mediaFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mediaCodec.start();
try {
String currentDateTimeString = DateFormat.getDateTimeInstance().format(new Date());
outputPath = new File(Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_MOVIES),
"pixel"+currentDateTimeString+".mp4").toString();
mediaMuxer = new MediaMuxer(outputPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
} catch (IOException ioe) {
Loge("MediaMuxer creation failed");
}
}
private void Encode() {
while (true) {
if (!mRunning) {
break;
}
Logd("Encode start");
long TIMEOUT_USEC = 5000;
int inputBufIndex = mediaCodec.dequeueInputBuffer(TIMEOUT_USEC);
long ptsUsec = computePresentationTime(generateIndex, FRAME_RATE);
if (inputBufIndex >= 0) {
byte[] input = bitFirst.get(current_index_frame);
final ByteBuffer inputBuffer = mediaCodec.getInputBuffer(inputBufIndex);
inputBuffer.clear();
inputBuffer.put(input);
mediaCodec.queueInputBuffer(inputBufIndex, 0, input.length, ptsUsec, 0);
generateIndex++;
}
MediaCodec.BufferInfo mBufferInfo = new MediaCodec.BufferInfo();
int encoderStatus = mediaCodec.dequeueOutputBuffer(mBufferInfo, TIMEOUT_USEC);
if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
// no output available yet
Loge("No output from encoder available");
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// not expected for an encoder
MediaFormat newFormat = mediaCodec.getOutputFormat();
mTrackIndex = mediaMuxer.addTrack(newFormat);
mediaMuxer.start();
} else if (encoderStatus < 0) {
Loge("unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus);
} else if (mBufferInfo.size != 0) {
ByteBuffer encodedData = mediaCodec.getOutputBuffer(encoderStatus);
if (encodedData == null) {
Loge("encoderOutputBuffer " + encoderStatus + " was null");
} else {
encodedData.position(mBufferInfo.offset);
encodedData.limit(mBufferInfo.offset + mBufferInfo.size);
mediaMuxer.writeSampleData(mTrackIndex, encodedData, mBufferInfo);
mediaCodec.releaseOutputBuffer(encoderStatus, false);
}
}
current_index_frame++;
if (current_index_frame > MAX_FRAME_VIDEO - 1) {
Log.d(TAG, "mRunning = false;");
mRunning = false;
}
Logd("Encode end");
}
}
private void Release() {
if (mediaCodec != null) {
mediaCodec.stop();
mediaCodec.release();
mediaCodec = null;
Logd("RELEASE CODEC");
}
if (mediaMuxer != null) {
mediaMuxer.stop();
mediaMuxer.release();
mediaMuxer = null;
Logd("RELEASE MUXER");
}
}
/**
* Returns the first codec capable of encoding the specified MIME type, or
* null if no match was found.
*/
private static MediaCodecInfo selectCodec(String mimeType) {
int numCodecs = MediaCodecList.getCodecCount();
for (int i = 0; i < numCodecs; i++) {
MediaCodecInfo codecInfo = MediaCodecList.getCodecInfoAt(i);
if (!codecInfo.isEncoder()) {
continue;
}
String[] types = codecInfo.getSupportedTypes();
for (int j = 0; j < types.length; j++) {
if (types[j].equalsIgnoreCase(mimeType)) {
return codecInfo;
}
}
}
return null;
}
/**
* Returns a color format that is supported by the codec and by this test
* code. If no match is found, this throws a test failure -- the set of
* formats known to the test should be expanded for new platforms.
*/
private static int selectColorFormat(MediaCodecInfo codecInfo,
String mimeType) {
MediaCodecInfo.CodecCapabilities capabilities = codecInfo
.getCapabilitiesForType(mimeType);
for (int i = 0; i < capabilities.colorFormats.length; i++) {
int colorFormat = capabilities.colorFormats[i];
if (isRecognizedFormat(colorFormat)) {
return colorFormat;
}
}
return 0; // not reached
}
/**
* Returns true if this is a color format that this test code understands
* (i.e. we know how to read and generate frames in this format).
*/
private static boolean isRecognizedFormat(int colorFormat) {
switch (colorFormat) {
// these are the formats we know how to handle for
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Planar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedSemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_TI_FormatYUV420PackedSemiPlanar:
return true;
default:
return false;
}
}
private byte[] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {
int[] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
private void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0 : ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0 : ((U > 255) ? 255 : U));
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0 : ((V > 255) ? 255 : V));
}
index++;
}
}
}
private void CheckDataListState() {
if (bitList == null) {
bitList = new ArrayList<>();
}
if (bitFirst == null) {
bitFirst = new ArrayList<>();
}
if (bitLast == null) {
bitLast = new ArrayList<>();
}
}
private long computePresentationTime(long frameIndex, int framerate) {
return 132 + frameIndex * 1000000 / framerate;
}
private static void Logd(String Mess) {
if (DEBUG) {
Log.d(TAG, Mess);
}
}
private static void Loge(String Mess) {
Log.e(TAG, Mess);
}
}
Works with 21 SDK. Do not pay attention to several lists. So it was done since the conversion of a piece of data goes in runtime.
Can anyone please help me in getting the camerapreview frame data without clicking the camara click. I want to get the currect camera data with out clicking the camera button
I guess you are searching for this function in the Camera class,
public final void setPreviewCallback (Camera.PreviewCallback cb)
Define the callback
private PreviewCallback mPreviewCallback = new PreviewCallback() {
public void onPreviewFrame(byte[] data, Camera camera) {
}
}
Once the preview is started this callback will get triggered on each frame, the data (byte[]) are in the preview format, which you can find while setting the Camera Parameters
First get a list of supported preview formats
List<Integer> Camera.Parameters.getSupportedPreviewFormats()
the default format is ImageFormat.NV21
If you want to change the preview format use this function, choose a format from the available formats
Camera.Parameters.setPreviewFormat(int pixel_format)
Seeing as you're using MonoDevelop and not writing in Java the procedure will be a little different.
You can create a camera preview handler class like so:
public class CameraListener : Java.Lang.Object, Camera.IPreviewCallback
{
public event PreviewFrameHandler PreviewFrame;
public void OnPreviewFrame(byte[] data, Camera camera)
{
if (PreviewFrame != null)
{
PreviewFrame(this, new PreviewFrameEventArgs(data, camera));
}
}
}
public delegate void PreviewFrameHandler(object sender, PreviewFrameEventArgs e);
public class PreviewFrameEventArgs : EventArgs
{
readonly byte[] _data;
readonly Camera _camera;
public byte[] Data { get { return _data; } }
public Camera Camera { get { return _camera; } }
public PreviewFrameEventArgs(byte[] data, Camera camera)
{
_data = data;
_camera = camera;
}
}
The class provides an event that is fired for each frame received.
In my own code I use the YUV420_NV21 format
I decode the data using the following method:
unsafe public static void convertYUV420_NV21toRGB565(byte* yuvIn, Int16* rgbOut, int width, int height, bool monochrome)
{
int size = width * height;
int offset = size;
int u, v, y1, y2, y3, y4;
for (int i = 0, k = 0; i < size; i += 2, k += 2)
{
y1 = yuvIn[i];
y2 = yuvIn[i + 1];
y3 = yuvIn[width + i];
y4 = yuvIn[width + i + 1];
u = yuvIn[offset + k];
v = yuvIn[offset + k + 1];
u = u - 128;
v = v - 128;
if (monochrome)
{
convertYUVtoRGB565Monochrome(y1, u, v, rgbOut, i);
convertYUVtoRGB565Monochrome(y2, u, v, rgbOut, (i + 1));
convertYUVtoRGB565Monochrome(y3, u, v, rgbOut, (width + i));
convertYUVtoRGB565Monochrome(y4, u, v, rgbOut, (width + i + 1));
}
else
{
convertYUVtoRGB565(y1, u, v, rgbOut, i);
convertYUVtoRGB565(y2, u, v, rgbOut, (i + 1));
convertYUVtoRGB565(y3, u, v, rgbOut, (width + i));
convertYUVtoRGB565(y4, u, v, rgbOut, (width + i + 1));
}
if (i != 0 && (i + 2) % width == 0)
i += width;
}
}
unsafe private static void convertYUVtoRGB565Monochrome(int y, int u, int v, Int16* rgbOut, int index)
{
rgbOut[index] = (short)(((y & 0xf8) << 8) |
((y & 0xfc) << 3) |
((y >> 3) & 0x1f));
}
unsafe private static void convertYUVtoRGB565(int y, int u, int v, Int16* rgbOut, int index)
{
int r = y + (int)1.402f * v;
int g = y - (int)(0.344f * u + 0.714f * v);
int b = y + (int)1.772f * u;
r = r > 255 ? 255 : r < 0 ? 0 : r;
g = g > 255 ? 255 : g < 0 ? 0 : g;
b = b > 255 ? 255 : b < 0 ? 0 : b;
rgbOut[index] = (short)(((b & 0xf8) << 8) |
((g & 0xfc) << 3) |
((r >> 3) & 0x1f));
}
I've included both monochrome and colour decoders.
The resulting data from this code is in the OpenGL 565 RGB format and can be used to initialise OpenGL textures or you can just mess with the pixels for image analysis etc.
Bob Powell.
This recipe from Xamarin explains how to use the Camera class to get a preview and display it to the user.