I'm traying to write an App with NDK camera API but I'm facing issues with encoding, this is my code (sorry for code identation).
static void image_reader_on_image_available(void *context,
AImageReader *reader){
media_status_t media_status;
AImage *image = NULL;
media_status = AImageReader_acquireLatestImage(reader, &image);
if (media_status != AMEDIA_OK) {
LOGI("No image available.");
return;
}
size_t bufferSize = 0;
int inputBufferIdx = AMediaCodec_dequeueInputBuffer(mediaCodec, -1);
uint8_t *inputBuffer = AMediaCodec_getInputBuffer(mediaCodec, inputBufferIdx, &bufferSize);
if (inputBufferIdx >= 0) {
LOGI("dequeue input buffer (size: %u)", bufferSize);
int dataSize = 0;
AImage_getPlaneData(image, 0, &inputBuffer, &dataSize);
LOGI("copying image buffer (size: %d)", dataSize);
int64_t timestamp = 0;
AImage_getTimestamp(image, ×tamp);
AMediaCodec_queueInputBuffer(mediaCodec, inputBufferIdx, 0, bufferSize, timestamp, 0);
}
AMediaCodecBufferInfo bufferInfo;
int idx = AMediaCodec_dequeueOutputBuffer(mediaCodec, &bufferInfo, -1);
LOGI("dequeue output buffer idx: %d", idx);
if (idx == AMEDIACODEC_INFO_OUTPUT_FORMAT_CHANGED) {
AMediaFormat *format = AMediaCodec_getOutputFormat(mediaCodec);
tidx = AMediaMuxer_addTrack(mediaMuxer, format);
LOGI("added track tidx: %d (format: %s)", tidx, AMediaFormat_toString(format));
AMediaMuxer_start(mediaMuxer);
AMediaFormat_delete(format);
} else if (idx >= 0) {
uint8_t *outputBuffer = AMediaCodec_getOutputBuffer(mediaCodec, idx, &bufferSize);
if (tidx >= 0 && bufferInfo.size > 0) {
pthread_mutex_lock(&lock);
LOGI("sending buffer to tidx: %d ptr: %p (info->offset: %d, info->size: %d)", tidx, outputBuffer, bufferInfo.offset, bufferInfo.size);
AMediaMuxer_writeSampleData(mediaMuxer, tidx, outputBuffer, &bufferInfo);
pthread_mutex_unlock(&lock);
}
AMediaCodec_releaseOutputBuffer(mediaCodec, idx, false);
}
AImage_delete(image);
}
Start recording
JNIEXPORT void JNICALL Java_org_freedesktop_nativecamera2_NativeCamera2_startRecording(JNIEnv *env,
jclass clazz,
int fd,
jobject surface) {
int32_t width, height;
media_status_t media_status;
pthread_mutex_init(&lock, NULL);
theNativeWindow = ANativeWindow_fromSurface(env, surface);
mediaMuxer = AMediaMuxer_new(fd, AMEDIAMUXER_OUTPUT_FORMAT_MPEG_4);
width = 640; //ANativeWindow_getWidth(theNativeWindow);
height = 480; //ANativeWindow_getHeight(theNativeWindow);
LOGI("Setting ImageReader (w: %d, h: %d)\n", width, height);
AImageReader_new(width, height,
AIMAGE_FORMAT_YUV_420_888,
5, /* maxImages */
&imageReader);
media_status = AImageReader_getWindow(imageReader, &imageReaderWindow);
if (media_status != AMEDIA_OK) {
LOGE("Failed to get a native window from image reader.");
return;
}
imageReaderListener.onImageAvailable = image_reader_on_image_available;
AImageReader_setImageListener(imageReader, &imageReaderListener);
mediaCodec = AMediaCodec_createEncoderByType("video/avc");
AMediaFormat *mediaFormat = AMediaFormat_new();
/** These are mandatory to configure mediacodec. */
AMediaFormat_setString(mediaFormat, AMEDIAFORMAT_KEY_MIME, "video/avc");
//AMediaFormat_setInt32(mediaFormat, AMEDIAFORMAT_KEY_BIT_RATE, 125000);
AMediaFormat_setFloat(mediaFormat, AMEDIAFORMAT_KEY_FRAME_RATE, 15.0);
AMediaFormat_setInt32(mediaFormat, AMEDIAFORMAT_KEY_I_FRAME_INTERVAL, 5);
AMediaFormat_setInt32(mediaFormat, AMEDIAFORMAT_KEY_WIDTH, width);
AMediaFormat_setInt32(mediaFormat, AMEDIAFORMAT_KEY_HEIGHT, height);
AMediaFormat_setInt32(mediaFormat, AMEDIAFORMAT_KEY_COLOR_FORMAT, 0x7f420888 /*COLOR_FormatYUV420Flexible*/);
media_status = AMediaCodec_configure(mediaCodec, mediaFormat, NULL, NULL, AMEDIACODEC_CONFIGURE_FLAG_ENCODE);
if (media_status != AMEDIA_OK) {
LOGE("Failed to configure media codec(return code: %x)", media_status);
}
AMediaFormat_delete(mediaFormat);
openCamera(TEMPLATE_RECORD);
ACameraOutputTarget_create(theNativeWindow, &cameraOutputTarget);
ACaptureRequest_addTarget(captureRequest, cameraOutputTarget);
ACameraOutputTarget_create(imageReaderWindow, &imageReaderOutputTarget);
ACaptureRequest_addTarget(captureRequest, imageReaderOutputTarget);
ACaptureSessionOutput_create(theNativeWindow, &sessionOutput);
ACaptureSessionOutputContainer_add(captureSessionOutputContainer, sessionOutput);
ACaptureSessionOutput_create(imageReaderWindow, &imageReaderSessionOutput);
ACaptureSessionOutputContainer_add(captureSessionOutputContainer, imageReaderSessionOutput);
ACameraDevice_createCaptureSession(cameraDevice, captureSessionOutputContainer,
&captureSessionStateCallbacks, &captureSession);
ACameraCaptureSession_setRepeatingRequest(captureSession, NULL, 1, &captureRequest, NULL);
AMediaCodec_start(mediaCodec);
}
after runing,video recording is stored in green color. I only see green color when I playing it.
I tried different media format but I do not kown if I'm missing somenthing
Related
I'm developing an Android app where I use cameraCaptureSession for grab the camera frames for send to native routine where I will use openCV for license plates recognition.
I need to work with full resolution (5312x2988) because I have to detect far object, but it's too slow, so I had to resize it for detect a bigger area (a car), and on this area I will detect the license plates in high resolution. With this solution It's little better, but not enought.
I think that problem is when I write the matrix on the surface, but i don't know how can do differently.
Here is my code:
JNIEXPORT void JNICALL Java_com_studio_myproject_activity_JNIUtils_nativeDetectPlateNumber
(JNIEnv * jenv, jobject obj, jint srcWidth, jint srcHeight, jobject srcBuffer, jobject dstSurface, jfloat focalLenght, jfloat sensorHeight)
{
if(classifierCar.empty() || classifierPlate.empty()) {
LOGD("Nessun classifier รจ stato caricato");
return;
}
uint8_t *srcLumaPtr = reinterpret_cast<uint8_t *>(jenv->GetDirectBufferAddress(srcBuffer));
int dstWidth;
int dstHeight;
cv::Mat mYuv(srcHeight + srcHeight / 2, srcWidth, CV_8UC1, srcLumaPtr);
ANativeWindow *win = ANativeWindow_fromSurface(jenv, dstSurface);
ANativeWindow_acquire(win);
ANativeWindow_Buffer buf;
dstWidth = srcWidth;
dstHeight = srcHeight;
ANativeWindow_setBuffersGeometry(win, dstWidth, dstHeight, 0 /*format unchanged*/);
if (int32_t err = ANativeWindow_lock(win, &buf, NULL)) {
LOGD("ANativeWindow_lock failed with error code %d\n", err);
ANativeWindow_release(win);
}
uint8_t *dstLumaPtr = reinterpret_cast<uint8_t *>(buf.bits);
Mat dstRgba(dstHeight, buf.stride, CV_8UC4,
dstLumaPtr); // TextureView buffer, use stride as width
Mat srcRgba(srcHeight, srcWidth, CV_8UC4);
Mat mGray(dstHeight, dstWidth, CV_8UC4);
// convert YUV -> RGBA
cv::cvtColor(mYuv, srcRgba, CV_YUV2RGBA_NV21);
cv::cvtColor(srcRgba, mGray, COLOR_RGBA2GRAY);
//diminuisco la dimensione dell'immagine
cv::Mat mResized;
cv::resize(mGray, mResized, cvSize(0,0), 0.12, 0.12);
vector<Rect> cars;
classifierCar.detectMultiScale(mResized, cars, 1.1, 3, 2, Size(20, 20), Size(640, 640));
vector<Rect> plates;
cv::Mat croppedImage;
for (int i=0; i< cars.size(); i++){
int newLeftCar = srcWidth*cars[i].tl().x/(mResized.cols);
int newTopCar = srcHeight*cars[i].tl().y/(mResized.rows);
int newRightCar = srcWidth*cars[i].br().x/(mResized.cols);
int newBottomCar = srcHeight*cars[i].br().y/(mResized.rows);
Point newTlCar = Point(newLeftCar, newTopCar);
Point newBrCar = Point(newRightCar, newBottomCar);
rectangle(srcRgba,newTlCar, newBrCar, Scalar(0, 255, 0, 255),3);
croppedImage = srcRgba(Rect(newLeftCar, newTopCar, abs(newRightCar-newLeftCar), abs(newBottomCar-newTopCar)));
classifierPlate.detectMultiScale(croppedImage, plates, 1.1, 3, 2, Size(100, 50), Size(640, 360));
for(int j=0; j< plates.size(); j++){
int newLeftPlate = plates[j].tl().x + newLeftCar;
int newTopPlate = plates[j].tl().y + newTopCar;
int newRightPlate = plates[j].br().x + newLeftCar;
int newBottomPlate = plates[j].br().y + newTopCar;
Point newTlPlate = Point(newLeftPlate, newTopPlate);
Point newBrPlate = Point(newRightPlate, newBottomPlate);
rectangle(srcRgba, newTlPlate, newBrPlate, Scalar (255, 0, 0, 255 ), 3);
}
}
// copy to TextureView surface
uchar *dbuf;
uchar *sbuf;
dbuf = dstRgba.data;
sbuf = srcRgba.data;
int i;
for (i = 0; i < srcRgba.rows; i++) {
dbuf = dstRgba.data + i * buf.stride * 4;
memcpy(dbuf, sbuf, srcRgba.cols * 4);
sbuf += srcRgba.cols * 4;
}
ANativeWindow_unlockAndPost(win);
ANativeWindow_release(win);
}
I have an app that convert images to video, in Google Play I see the following crash (which the only details I get is the name of the function and I don't understand the rest):
backtrace:
#00 pc 0000cc78 /data/app-lib/com.myapp-1/libswscale.so (sws_scale+204)
#01 pc 000012af /data/app-lib/com.myapp-1/libffmpeg.so (OpenImage+322)
code around pc:
79065c58 e58d8068 e58d2070 e58d3074 059d00b0
The code point to the function sws_scale, the code works almost all the time on my device (Nexus 5) but I see a lot of reports even with the same device with that issue. Any idea why this could happen?
AVFrame* OpenImage(const char* imageFileName, int W_VIDEO, int H_VIDEO, int* numBytes)
{
AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
int frameFinished;
uint8_t *buffer;
AVPacket packet;
int srcBytes;
AVFrame* frame2 = NULL;// scaled frame
uint8_t* frame2_buffer;
struct SwsContext *resize;
if(av_open_input_file(&pFormatCtx, imageFileName, NULL, 0, NULL)!=0)
{
LOGI("Can't open image file '%s'\n", imageFileName);
return NULL;
}
//dump_format(pFormatCtx, 0, imageFileName, 0);
if (av_find_stream_info(pFormatCtx) < 0)
{
LOGI("Can't find stream info.");
return NULL;
}
pCodecCtx = pFormatCtx->streams[0]->codec;
pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (!pCodec)
{
LOGI("Codec not found\n");
return NULL;
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
{
LOGI("Could not open codec\n");
return NULL;
}
pFrame = avcodec_alloc_frame();
if (!pFrame)
{
LOGI("Can't allocate memory for AVFrame\n");
return NULL;
}
// Determine required buffer size and allocate buffer
srcBytes = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(srcBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrame, buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
// Read frame
if (av_read_frame(pFormatCtx, &packet) >= 0)
{
int ret;
// if(packet.stream_index != 0)
// continue;
ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (ret > 0)
{
//LOGI("Frame is decoded, size %d\n", ret);
pFrame->quality = 4;
// Create another frame for resized result
frame2 = avcodec_alloc_frame();
*numBytes = avpicture_get_size(PIX_FMT_YUV420P, W_VIDEO, H_VIDEO);
frame2_buffer = (uint8_t *)av_malloc(*numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture*)frame2, frame2_buffer, PIX_FMT_YUV420P, W_VIDEO, H_VIDEO);
// Get resize context
resize = sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, W_VIDEO, H_VIDEO, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
// frame2 should be filled with resized samples
ret = sws_scale(resize, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, frame2->data, frame2->linesize);
sws_freeContext(resize);
}
else
LOGI("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
}
av_free(pFrame);
av_free_packet(&packet);
avcodec_close(pCodecCtx);
//av_free(pCodecCtx);
av_close_input_file(pFormatCtx);
return frame2;
}
After your avcodec_decode_video2, do not check ret only. You need to check frameFinished too. if frameFinished == 0, your frame should not be used (because not filled). I do not know for images, but when you decode video, it happens very often. You need to read the next packet and give to the next call of avcodec_decode_video2.
On a side note: why are you forcing pCodecCtx->pix_fmt = PIX_FMT_YUV420P? It is automatically set to the correct format by av_find_stream_info, and you should use it as sws_getContext parameter.
Last thing: no need to fill your pFramewith avpicture_fill. You only need to av_frame_alloc() it, and avcodec_decode_video2 will take care of filling it.
After this line
*numBytes = avpicture_get_size(PIX_FMT_YUV420P, W_VIDEO, H_VIDEO);
You have not checked the return value.
In the avpicture_get_size describe:
"Returns:
the computed picture buffer size or a negative error code in case of error"
When you check the *numBytes (and srcBytes, buffer, frame2_buffer) value(s), maybe it will be better...
I have a bunch of local images saved as jpeg files. My images are captured using CameraPreview and the PreviewFormat is as default: NV21. I want to generate a small video from a fixed number of images.
I am not going to use FFMpeg because it requires NDK and will introduce compatibility issues.
MediaCodec and MediaMuxer seems work but there are not one working solutions on the web.
There are a few references lead to my current solution.
1.EncodeAndMuxTest: http://bigflake.com/mediacodec/EncodeAndMuxTest.java.txt
This one is written by fadden. It quite suits my needs except he is using createInputSurface not queueInputBuffer.
2.Convert bitmap array to YUV (YCbCr NV21)
I do the conversion following this answer. https://stackoverflow.com/a/17116985/3047840
3.Using MediaCodec to save series of images as Video
This question looks much similar as mine but I don't bother using MediaMuxer.
My code is the following:
public class EncodeAndMux extends Activity {
private static final String TAG = "EncodeAndMuxTest";
private static final boolean VERBOSE = false;
private static final File OUTPUT_DIR = Environment
.getExternalStorageDirectory();
private static final String MIME_TYPE = "video/avc";
private static final int FRAME_RATE = 10;
// 10 seconds between I-frames
private static final int IFRAME_INTERVAL = 10;
private static final int NUM_FRAMES = 5;
private static final String DEBUG_FILE_NAME_BASE = "/sdcard/test";
// two seconds of video size of a frame, in pixels
private int mWidth = -1;
private int mHeight = -1;
// bit rate, in bits per second
private int mBitRate = -1;
private byte[] mFrame;
// largest color component delta seen (i.e. actual vs. expected)
private int mLargestColorDelta;
// encoder / muxer state
private MediaCodec mEncoder;
private MediaMuxer mMuxer;
private int mTrackIndex;
private boolean mMuxerStarted;
private Utils mUtils;
private float mPadding;
private int mColumnWidth;
private static final int TEST_Y = 120; // YUV values for colored rect
private static final int TEST_U = 160;
private static final int TEST_V = 200;
private static final int TEST_R0 = 0; // RGB equivalent of {0,0,0}
private static final int TEST_G0 = 136;
private static final int TEST_B0 = 0;
private static final int TEST_R1 = 236; // RGB equivalent of {120,160,200}
private static final int TEST_G1 = 50;
private static final int TEST_B1 = 186;
private static final boolean DEBUG_SAVE_FILE = false; // save copy of
// encoded movie
// allocate one of these up front so we don't need to do it every time
private MediaCodec.BufferInfo mBufferInfo;
private ArrayList<String> mImagePaths = new ArrayList<String>();
byte[] getNV21(int inputWidth, int inputHeight, Bitmap scaled) {
int[] argb = new int[inputWidth * inputHeight];
scaled.getPixels(argb, 0, inputWidth, 0, 0, inputWidth, inputHeight);
byte[] yuv = new byte[inputWidth * inputHeight * 3 / 2];
encodeYUV420SP(yuv, argb, inputWidth, inputHeight);
scaled.recycle();
return yuv;
}
void encodeYUV420SP(byte[] yuv420sp, int[] argb, int width, int height) {
final int frameSize = width * height;
int yIndex = 0;
int uvIndex = frameSize;
int a, R, G, B, Y, U, V;
int index = 0;
for (int j = 0; j < height; j++) {
for (int i = 0; i < width; i++) {
a = (argb[index] & 0xff000000) >> 24; // a is not used obviously
R = (argb[index] & 0xff0000) >> 16;
G = (argb[index] & 0xff00) >> 8;
B = (argb[index] & 0xff) >> 0;
// well known RGB to YUV algorithm
Y = ((66 * R + 129 * G + 25 * B + 128) >> 8) + 16;
U = ((-38 * R - 74 * G + 112 * B + 128) >> 8) + 128;
V = ((112 * R - 94 * G - 18 * B + 128) >> 8) + 128;
// NV21 has a plane of Y and interleaved planes of VU each
// sampled by a factor of 2
// meaning for every 4 Y pixels there are 1 V and 1 U. Note the
// sampling is every other
// pixel AND every other scanline.
yuv420sp[yIndex++] = (byte) ((Y < 0) ? 0
: ((Y > 255) ? 255 : Y));
if (j % 2 == 0 && index % 2 == 0) {
yuv420sp[uvIndex++] = (byte) ((V < 0) ? 0
: ((V > 255) ? 255 : V));
yuv420sp[uvIndex++] = (byte) ((U < 0) ? 0
: ((U > 255) ? 255 : U));
}
index++;
}
}
}
public static Bitmap decodeFile(String filePath, int WIDTH, int HIGHT) {
try {
File f = new File(filePath);
BitmapFactory.Options o = new BitmapFactory.Options();
o.inJustDecodeBounds = true;
o.inPurgeable = true;
o.inInputShareable = true;
BitmapFactory.decodeStream(new FileInputStream(f), null, o);
final int REQUIRED_WIDTH = WIDTH;
final int REQUIRED_HIGHT = HIGHT;
int scale = 1;
while (o.outWidth / scale / 2 >= REQUIRED_WIDTH
&& o.outHeight / scale / 2 >= REQUIRED_HIGHT)
scale *= 2;
BitmapFactory.Options o2 = new BitmapFactory.Options();
o2.inSampleSize = scale;
o2.inPurgeable = true;
o2.inInputShareable = true;
return BitmapFactory.decodeStream(new FileInputStream(f), null, o2);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
return null;
}
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_encode_and_mux);
mUtils = new Utils(this);
mImagePaths = mUtils.getBackFilePaths();
mPadding = TypedValue.applyDimension(TypedValue.COMPLEX_UNIT_DIP,
AppConstant.GRID_PADDING, getResources().getDisplayMetrics());
mColumnWidth = (int) ((mUtils.getScreenWidth() - ((AppConstant.NUM_OF_COLUMNS + 1) * mPadding)) / AppConstant.NUM_OF_COLUMNS);
try {
testEncodeDecodeVideoFromBufferToSurface720p();
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
} catch (Throwable e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
/**
* Returns the first codec capable of encoding the specified MIME type, or null if no
* match was found.
*/
private static MediaCodecInfo selectCodec(String mimeType) {
int numCodecs = MediaCodecList.getCodecCount();
for (int i = 0; i < numCodecs; i++) {
MediaCodecInfo codecInfo = MediaCodecList.getCodecInfoAt(i);
if (!codecInfo.isEncoder()) {
continue;
}
String[] types = codecInfo.getSupportedTypes();
for (int j = 0; j < types.length; j++) {
if (types[j].equalsIgnoreCase(mimeType)) {
return codecInfo;
}
}
}
return null;
}
/**
* Returns a color format that is supported by the codec and by this test code. If no
* match is found, this throws a test failure -- the set of formats known to the test
* should be expanded for new platforms.
*/
private static int selectColorFormat(MediaCodecInfo codecInfo, String mimeType) {
MediaCodecInfo.CodecCapabilities capabilities = codecInfo.getCapabilitiesForType(mimeType);
for (int i = 0; i < capabilities.colorFormats.length; i++) {
int colorFormat = capabilities.colorFormats[i];
if (isRecognizedFormat(colorFormat)) {
return colorFormat;
}
}
Log.e("","couldn't find a good color format for " + codecInfo.getName() + " / " + mimeType);
return 0; // not reached
}
/**
* Returns true if this is a color format that this test code understands (i.e. we know how
* to read and generate frames in this format).
*/
private static boolean isRecognizedFormat(int colorFormat) {
switch (colorFormat) {
// these are the formats we know how to handle for this test
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Planar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedSemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_TI_FormatYUV420PackedSemiPlanar:
return true;
default:
return false;
}
}
/**
* Returns true if the specified color format is semi-planar YUV. Throws an exception
* if the color format is not recognized (e.g. not YUV).
*/
private static boolean isSemiPlanarYUV(int colorFormat) {
switch (colorFormat) {
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420Planar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedPlanar:
return false;
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420SemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_FormatYUV420PackedSemiPlanar:
case MediaCodecInfo.CodecCapabilities.COLOR_TI_FormatYUV420PackedSemiPlanar:
return true;
default:
throw new RuntimeException("unknown format " + colorFormat);
}
}
/**
* Does the actual work for encoding frames from buffers of byte[].
*/
private void doEncodeDecodeVideoFromBuffer(MediaCodec encoder, int encoderColorFormat,
MediaCodec decoder, boolean toSurface) {
final int TIMEOUT_USEC = 10000;
ByteBuffer[] encoderInputBuffers = encoder.getInputBuffers();
ByteBuffer[] encoderOutputBuffers = encoder.getOutputBuffers();
ByteBuffer[] decoderInputBuffers = null;
ByteBuffer[] decoderOutputBuffers = null;
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
MediaFormat decoderOutputFormat = null;
int generateIndex = 0;
int checkIndex = 0;
int badFrames = 0;
boolean decoderConfigured = false;
OutputSurface outputSurface = null;
// The size of a frame of video data, in the formats we handle, is stride*sliceHeight
// for Y, and (stride/2)*(sliceHeight/2) for each of the Cb and Cr channels. Application
// of algebra and assuming that stride==width and sliceHeight==height yields:
// Just out of curiosity.
long rawSize = 0;
long encodedSize = 0;
// Save a copy to disk. Useful for debugging the test. Note this is a raw elementary
// stream, not a .mp4 file, so not all players will know what to do with it.
if (toSurface) {
outputSurface = new OutputSurface(mWidth, mHeight);
}
// Loop until the output side is done.
boolean inputDone = false;
boolean encoderDone = false;
boolean outputDone = false;
while (!outputDone) {
Log.e(TAG, "loop");
// If we're not done submitting frames, generate a new one and submit it. By
// doing this on every loop we're working to ensure that the encoder always has
// work to do.
//
// We don't really want a timeout here, but sometimes there's a delay opening
// the encoder device, so a short timeout can keep us from spinning hard.
if (!inputDone) {
int inputBufIndex = encoder.dequeueInputBuffer(TIMEOUT_USEC);
Log.e(TAG, "inputBufIndex=" + inputBufIndex);
if (inputBufIndex >= 0) {
long ptsUsec = computePresentationTime(generateIndex);
if (generateIndex == NUM_FRAMES) {
// Send an empty frame with the end-of-stream flag set. If we set EOS
// on a frame with data, that frame data will be ignored, and the
// output will be short one frame.
encoder.queueInputBuffer(inputBufIndex, 0, 0, ptsUsec,
MediaCodec.BUFFER_FLAG_END_OF_STREAM);
inputDone = true;
Log.e(TAG, "sent input EOS (with zero-length frame)");
} else {
generateFrame(generateIndex, encoderColorFormat, mFrame);
//generateFrame(generateIndex);
ByteBuffer inputBuf = encoderInputBuffers[inputBufIndex];
// the buffer should be sized to hold one full frame
inputBuf.clear();
inputBuf.put(mFrame);
encoder.queueInputBuffer(inputBufIndex, 0, mFrame.length, ptsUsec, 0);
Log.e(TAG, "submitted frame " + generateIndex + " to enc");
}
generateIndex++;
} else {
// either all in use, or we timed out during initial setup
Log.e(TAG, "input buffer not available");
}
}
// Check for output from the encoder. If there's no output yet, we either need to
// provide more input, or we need to wait for the encoder to work its magic. We
// can't actually tell which is the case, so if we can't get an output buffer right
// away we loop around and see if it wants more input.
//
// Once we get EOS from the encoder, we don't need to do this anymore.
if (!encoderDone) {
int encoderStatus = encoder.dequeueOutputBuffer(info, TIMEOUT_USEC);
if (encoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
// no output available yet
Log.e(TAG, "no output from encoder available");
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
// not expected for an encoder
encoderOutputBuffers = encoder.getOutputBuffers();
Log.e(TAG, "encoder output buffers changed");
} else if (encoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// not expected for an encoder
if (mMuxerStarted) {
throw new RuntimeException("format changed twice");
}
MediaFormat newFormat = encoder.getOutputFormat();
Log.e(TAG, "encoder output format changed: " + newFormat);
// now that we have the Magic Goodies, start the muxer
mTrackIndex = mMuxer.addTrack(newFormat);
Log.e(TAG, "muxer defined muxer format: " + newFormat);
mMuxer.start();
mMuxerStarted = true;
} else if (encoderStatus < 0) {
Log.e("","unexpected result from encoder.dequeueOutputBuffer: " + encoderStatus);
} else { // encoderStatus >= 0
ByteBuffer encodedData = encoderOutputBuffers[encoderStatus];
if (encodedData == null) {
Log.e("","encoderOutputBuffer " + encoderStatus + " was null");
}
// It's usually necessary to adjust the ByteBuffer values to match BufferInfo.
encodedData.position(info.offset);
encodedData.limit(info.offset + info.size);
encodedSize += info.size;
if ((info.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != 0) {
// Codec config info. Only expected on first packet. One way to
// handle this is to manually stuff the data into the MediaFormat
// and pass that to configure(). We do that here to exercise the API.
MediaFormat format =
MediaFormat.createVideoFormat(MIME_TYPE, mWidth, mHeight);
format.setByteBuffer("csd-0", encodedData);
decoder.configure(format, toSurface ? outputSurface.getSurface() : null,
null, 0);
decoder.start();
decoderInputBuffers = decoder.getInputBuffers();
decoderOutputBuffers = decoder.getOutputBuffers();
decoderConfigured = true;
Log.e(TAG, "decoder configured (" + info.size + " bytes)"+format);
} else {
// Get a decoder input buffer, blocking until it's available.
int inputBufIndex = decoder.dequeueInputBuffer(-1);
ByteBuffer inputBuf = decoderInputBuffers[inputBufIndex];
inputBuf.clear();
inputBuf.put(encodedData);
decoder.queueInputBuffer(inputBufIndex, 0, info.size,
info.presentationTimeUs, info.flags);
encoderDone = (info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0;
Log.e(TAG, "passed " + info.size + " bytes to decoder"
+ (encoderDone ? " (EOS)" : ""));
Log.e("encoderDone",encoderDone+"");
}
encoder.releaseOutputBuffer(encoderStatus, false);
}
}
// Check for output from the decoder. We want to do this on every loop to avoid
// the possibility of stalling the pipeline. We use a short timeout to avoid
// burning CPU if the decoder is hard at work but the next frame isn't quite ready.
//
// If we're decoding to a Surface, we'll get notified here as usual but the
// ByteBuffer references will be null. The data is sent to Surface instead.
if (decoderConfigured) {
int decoderStatus = decoder.dequeueOutputBuffer(info, 3*TIMEOUT_USEC);
if (decoderStatus == MediaCodec.INFO_TRY_AGAIN_LATER) {
// no output available yet
Log.e(TAG, "no output from decoder available");
} else if (decoderStatus == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
// The storage associated with the direct ByteBuffer may already be unmapped,
// so attempting to access data through the old output buffer array could
// lead to a native crash.
Log.e(TAG, "decoder output buffers changed");
decoderOutputBuffers = decoder.getOutputBuffers();
} else if (decoderStatus == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// this happens before the first frame is returned
decoderOutputFormat = decoder.getOutputFormat();
Log.e(TAG, "decoder output format changed: " +
decoderOutputFormat);
} else if (decoderStatus < 0) {
Log.e(TAG, "unexpected result from deocder.dequeueOutputBuffer: " + decoderStatus);
} else { // decoderStatus >= 0
if (!toSurface) {
ByteBuffer outputFrame = decoderOutputBuffers[decoderStatus];
outputFrame.position(info.offset);
outputFrame.limit(info.offset + info.size);
mMuxer.writeSampleData(mTrackIndex, outputFrame,
info);
rawSize += info.size;
if (info.size == 0) {
Log.e(TAG, "got empty frame");
} else {
Log.e(TAG, "decoded, checking frame " + checkIndex);
if (!checkFrame(checkIndex++, decoderOutputFormat, outputFrame)) {
badFrames++;
}
}
if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
Log.e(TAG, "output EOS");
outputDone = true;
}
decoder.releaseOutputBuffer(decoderStatus, false /*render*/);
} else {
Log.e(TAG, "surface decoder given buffer " + decoderStatus +
" (size=" + info.size + ")");
rawSize += info.size;
if ((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) != 0) {
Log.e(TAG, "output EOS");
outputDone = true;
}
boolean doRender = (info.size != 0);
// As soon as we call releaseOutputBuffer, the buffer will be forwarded
// to SurfaceTexture to convert to a texture. The API doesn't guarantee
// that the texture will be available before the call returns, so we
// need to wait for the onFrameAvailable callback to fire.
decoder.releaseOutputBuffer(decoderStatus, doRender);
if (doRender) {
Log.e(TAG, "awaiting frame " + checkIndex);
outputSurface.awaitNewImage();
outputSurface.drawImage();
if (!checkSurfaceFrame(checkIndex++)) {
badFrames++;
}
}
}
}
}
}
Log.e(TAG, "decoded " + checkIndex + " frames at "
+ mWidth + "x" + mHeight + ": raw=" + rawSize + ", enc=" + encodedSize);
if (outputSurface != null) {
outputSurface.release();
}
if (checkIndex != NUM_FRAMES) {
Log.e(TAG, "awaiting frame " + checkIndex);
}
if (badFrames != 0) {
Log.e(TAG, "Found " + badFrames + " bad frames");
}
}
private void generateFrame(int frameIndex) {
Bitmap bitmap = decodeFile(mImagePaths.get(frameIndex), mColumnWidth,
mColumnWidth);
mFrame = getNV21(bitmap.getWidth(), bitmap.getHeight(), bitmap);
}
/**
* Generates data for frame N into the supplied buffer. We have an 8-frame animation
* sequence that wraps around. It looks like this:
* <pre>
* 0 1 2 3
* 7 6 5 4
* </pre>
* We draw one of the eight rectangles and leave the rest set to the zero-fill color.
*/
private void generateFrame(int frameIndex, int colorFormat, byte[] mFrame) {
final int HALF_WIDTH = mWidth / 2;
boolean semiPlanar = isSemiPlanarYUV(colorFormat);
// Set to zero. In YUV this is a dull green.
Arrays.fill(mFrame, (byte) 0);
int startX, startY, countX, countY;
frameIndex %= 8;
//frameIndex = (frameIndex / 8) % 8; // use this instead for debug -- easier to see
if (frameIndex < 4) {
startX = frameIndex * (mWidth / 4);
startY = 0;
} else {
startX = (7 - frameIndex) * (mWidth / 4);
startY = mHeight / 2;
}
for (int y = startY + (mHeight/2) - 1; y >= startY; --y) {
for (int x = startX + (mWidth/4) - 1; x >= startX; --x) {
if (semiPlanar) {
// full-size Y, followed by UV pairs at half resolution
// e.g. Nexus 4 OMX.qcom.video.encoder.avc COLOR_FormatYUV420SemiPlanar
// e.g. Galaxy Nexus OMX.TI.DUCATI1.VIDEO.H264E
// OMX_TI_COLOR_FormatYUV420PackedSemiPlanar
mFrame[y * mWidth + x] = (byte) TEST_Y;
if ((x & 0x01) == 0 && (y & 0x01) == 0) {
mFrame[mWidth*mHeight + y * HALF_WIDTH + x] = (byte) TEST_U;
mFrame[mWidth*mHeight + y * HALF_WIDTH + x + 1] = (byte) TEST_V;
}
} else {
// full-size Y, followed by quarter-size U and quarter-size V
// e.g. Nexus 10 OMX.Exynos.AVC.Encoder COLOR_FormatYUV420Planar
// e.g. Nexus 7 OMX.Nvidia.h264.encoder COLOR_FormatYUV420Planar
mFrame[y * mWidth + x] = (byte) TEST_Y;
if ((x & 0x01) == 0 && (y & 0x01) == 0) {
mFrame[mWidth*mHeight + (y/2) * HALF_WIDTH + (x/2)] = (byte) TEST_U;
mFrame[mWidth*mHeight + HALF_WIDTH * (mHeight / 2) +
(y/2) * HALF_WIDTH + (x/2)] = (byte) TEST_V;
}
}
}
}
}
/**
* Sets the desired frame size and bit rate.
*/
private void setParameters(int width, int height, int bitRate) {
if ((width % 16) != 0 || (height % 16) != 0) {
Log.w(TAG, "WARNING: width or height not multiple of 16");
}
mWidth = width;
mHeight = height;
mBitRate = bitRate;
mFrame = new byte[mWidth * mHeight * 3 / 2];
}
public void testEncodeDecodeVideoFromBufferToSurface720p() throws Throwable {
setParameters(1280, 720, 6000000);
encodeDecodeVideoFromBuffer(false);
}
}
Logcat:
12-17 18:25:47.405: E/EncodeAndMuxTest(16415): found codec: OMX.qcom.video.encoder.avc
12-17 18:25:47.405: I/OMXClient(16415): Using client-side OMX mux.
12-17 18:25:47.455: E/EncodeAndMuxTest(16415): found colorFormat: 21
12-17 18:25:47.455: E/EncodeAndMuxTest(16415): format: {frame-rate=10, bitrate=6000000, height=720, mime=video/avc, color-format=21, i-frame-interval=10, width=1280}
12-17 18:25:47.465: I/OMXClient(16415): Using client-side OMX mux.
12-17 18:25:47.495: E/ACodec(16415): [OMX.qcom.video.encoder.avc] storeMetaDataInBuffers (output) failed w/ err -2147483648
12-17 18:25:47.495: I/ACodec(16415): setupVideoEncoder succeeded
12-17 18:25:47.535: I/OMXClient(16415): Using client-side OMX mux.
12-17 18:25:47.545: E/EncodeAndMuxTest(16415): loop
12-17 18:25:47.545: E/EncodeAndMuxTest(16415): inputBufIndex=0
12-17 18:25:47.655: E/EncodeAndMuxTest(16415): submitted frame 0 to enc
12-17 18:25:47.655: E/EncodeAndMuxTest(16415): encoder output format changed: {csd-1=java.nio.ByteArrayBuffer[position=0,limit=8,capacity=8], height=720, mime=video/avc, csd-0=java.nio.ByteArrayBuffer[position=0,limit=18,capacity=18], what=1869968451, width=1280}
12-17 18:25:47.655: E/EncodeAndMuxTest(16415): muxer defined muxer format: {csd-1=java.nio.ByteArrayBuffer[position=0,limit=8,capacity=8], height=720, mime=video/avc, csd-0=java.nio.ByteArrayBuffer[position=0,limit=18,capacity=18], what=1869968451, width=1280}
12-17 18:25:47.655: I/MPEG4Writer(16415): limits: 2147483647/0 bytes/us, bit rate: -1 bps and the estimated moov size 3072 bytes
12-17 18:25:47.655: E/EncodeAndMuxTest(16415): inputBufIndex=2
12-17 18:25:47.795: E/EncodeAndMuxTest(16415): submitted frame 1 to enc
12-17 18:25:47.825: E/EncodeAndMuxTest(16415): decoder configured (26 bytes){csd-0=java.nio.DirectByteBuffer[position=0,limit=26,capacity=692224], height=720, width=1280, mime=video/avc}
12-17 18:25:47.855: E/EncodeAndMuxTest(16415): no output from decoder available
12-17 18:25:47.855: E/EncodeAndMuxTest(16415): inputBufIndex=0
12-17 18:25:47.976: E/EncodeAndMuxTest(16415): submitted frame 2 to enc
12-17 18:25:48.136: E/EncodeAndMuxTest(16415): passed 3188 bytes to decoder
12-17 18:25:48.176: E/EncodeAndMuxTest(16415): no output from decoder available
12-17 18:25:48.176: E/EncodeAndMuxTest(16415): inputBufIndex=1
12-17 18:25:48.296: E/EncodeAndMuxTest(16415): submitted frame 3 to enc
12-17 18:25:48.296: E/EncodeAndMuxTest(16415): passed 1249 bytes to decoder
12-17 18:25:48.326: E/EncodeAndMuxTest(16415): no output from decoder available
12-17 18:25:48.326: E/EncodeAndMuxTest(16415): loop
12-17 18:25:48.326: E/EncodeAndMuxTest(16415): inputBufIndex=2
12-17 18:25:48.396: E/EncodeAndMuxTest(16415): submitted frame 4 to enc
12-17 18:25:48.396: E/EncodeAndMuxTest(16415): passed 3085 bytes to decoder
12-17 18:25:48.436: E/EncodeAndMuxTest(16415): no output from decoder available
12-17 18:25:48.436: E/EncodeAndMuxTest(16415): inputBufIndex=0
12-17 18:25:48.436: E/EncodeAndMuxTest(16415): sent input EOS (with zero-length frame)
12-17 18:25:48.436: E/EncodeAndMuxTest(16415): passed 3056 bytes to decoder
12-17 18:25:48.466: E/EncodeAndMuxTest(16415): no output from decoder available
12-17 18:25:48.466: E/EncodeAndMuxTest(16415): passed 1085 bytes to decoder (EOS)
12-17 18:25:48.476: E/EncodeAndMuxTest(16415): decoder output buffers changed
12-17 18:25:48.496: E/EncodeAndMuxTest(16415): decoder output format changed:
Reading the Jpegs, decompressing them, and then recompressing them is going to cause loss of Image quality (and take CPU effort / time) simply appending them altogether and tossing them in a Video Container will be faster and produce a better Video.
The MJpeg Video Format is quite old so (almost) any Program can play Mjpeg Videos.
I suggest a Solution similar to this http://sourceforge.net/projects/jpegtoavi/ IE: make an Mjpeg Movie from your Jpegs. There is more than that one Program to choose from, use a Search Engine (or our Search Bar) to find more Source Code.
.
I tested my Phone to see if it can understand Mjpegs by creating a File using this command:
ffmpeg.exe -i test_in.mp4 -vcodec mjpeg -acodec copy test_out.mp4
In: Stream #0:0(und): Video: h264 (Main) (avc1 / 0x31637661), yuv420p, 1280x720 [SAR 1:1 DAR 16:9], 1568 kb/s, 29.97 fps, 29.97 tbr, 90k tbn, 59.94 tbc (default)
Out: Stream #0:0(und): Video: mjpeg (l[0][0][0] / 0x006C), yuvj420p, 1280x720 [SAR 1:1 DAR 16:9], q=2-31, 200 kb/s, 30k tbn, 29.97 tbc (default)
Unfortunatley the Android "Gallery Player" is one of the Programs that does not understand that format, but BSPlayer, VLC, and MPlayer for Android can play that Format if you want the resulting Video to play on your Phone (without writing more Code).
I'm trying to encode an image to video using ffmpeg library.
I have these global params:
//Global params
AVCodec *codec;
AVCodecContext *codecCtx;
uint8_t *output_buffer;
int output_buffer_size;
I divided the encoding to 3 methods:
Initialize the encoder:
jint Java_com_camera_simpledoublewebcams2_CameraPreview_initencoder(JNIEnv* env,jobject thiz){
avcodec_register_all();
avcodec_init();
av_register_all();
int fps = 30;
/* find the H263 video encoder */
codec = avcodec_find_encoder(CODEC_ID_H263);
if (!codec) {
LOGI("avcodec_find_encoder() run fail.");
return -5;
}
//allocate context
codecCtx = avcodec_alloc_context();
/* put sample parameters */
codecCtx->bit_rate = 400000;
/* resolution must be a multiple of two */
codecCtx->width = 176;
codecCtx->height = 144;
/* frames per second */
codecCtx->time_base = (AVRational){1,fps};
codecCtx->pix_fmt = PIX_FMT_YUV420P;
codecCtx->codec_id = CODEC_ID_H263;
codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
/* open it */
if (avcodec_open(codecCtx, codec) < 0) {
LOGI("avcodec_open() run fail.");
return -10;
}
//init buffer
output_buffer_size = 500000;
output_buffer = malloc(output_buffer_size);
return 0;
}
Encoding the image:
jint Java_com_camera_simpledoublewebcams2_CameraPreview_encodejpeg(JNIEnv* env,jobject thiz,jchar* cImage, jint imageSize){
int out_size;
AVFrame *picture;
AVFrame *outpic;
uint8_t *outbuffer;
//allocate frame
picture = avcodec_alloc_frame();
outpic = avcodec_alloc_frame();
int nbytes = avpicture_get_size(PIX_FMT_YUV420P, codecCtx->width, codecCtx->height);
outbuffer = (uint8_t*)av_malloc(nbytes);
outpic->pts = 0;
//fill picture with image
avpicture_fill((AVPicture*)picture, (uint8_t*)cImage, PIX_FMT_RGBA, codecCtx->width, codecCtx->height);
//fill outpic with empty image
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, codecCtx->width, codecCtx->height);
//rescale the image
struct SwsContext* fooContext = sws_getContext(codecCtx->width, codecCtx->height,
PIX_FMT_RGBA,
codecCtx->width, codecCtx->height,
PIX_FMT_YUV420P,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(fooContext, picture->data, picture->linesize, 0, codecCtx->height, outpic->data, outpic->linesize);
//encode the image
out_size = avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, outpic);
out_size += avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, outpic);
//release pictures
av_free(outbuffer);
av_free(picture);
av_free(outpic);
return out_size;
}
And closing the encoder:
void Java_com_camera_simpledoublewebcams2_CameraPreview_closeencoder(JNIEnv* env,jobject thiz){
free(output_buffer);
avcodec_close(codecCtx);
av_free(codecCtx);
}
When I send the first image, I get a result from the encoder. When I try to send another image the program crashes.
I tried calling init once and then the images, then the close - didn't work.
I tried calling the init and the close for every image - didn't work.
Any suggestions?
Thanks!
EDIT: After further research I found that the problem is at sws_scale method.
Still don't know what is causing this issue...
out_size = avcodec_encode_video(codecCtx, output_buffer,output_buffer_size, outpic);
out_size += avcodec_encode_video(codecCtx, output_buffer, output_buffer_size,outpic);
Why are you encoding twice?
Perhaps the error is due to that double encoding. Try removing the second encoding.
I have been working on a project about video summarization on android platfrom. and I am stuck in encoding. I think;
first I must convert my frame into RGB Frame, then convert that RGB FRame into YUV Frame. Then encode the frame. After this operations, The output video was so weird. I think I missed something. Here is my las optimized code. Maybe someone has an experiement in this subject.
Its syntax is changed according to android ndk syntax:
jint Java_com_test_Test_encodeVideo(JNIEnv* env, jobject javaThis)
{
char *flname, *err, *info;
AVCodec *codec;
AVCodecContext *c= NULL;
int i,out_size, size, x, y,z, outbuf_size;
int frameCount=99;
FILE *f;
AVFrame *picture, *yuvFrame;
uint8_t *outbuf, *picture_buf;
PPMImage *img;
const char *destfilename = "/sdcard/new.mp4";
int numBytes;
uint8_t *buffer;
av_register_all();
// must be called before using avcodec lib
avcodec_init();
// register all the codecs
avcodec_register_all();
log_message("Video encoding\n");
// find the H263 video encoder
codec = avcodec_find_encoder(CODEC_ID_H263);
if (!codec) {
sprintf(err, "codec not found\n");
log_message(err);
}
c= avcodec_alloc_context();
picture= avcodec_alloc_frame();
yuvFrame= avcodec_alloc_frame();
// get first ppm context. it is because I need width and height values.
img = getPPM("/sdcard/frame1.ppm");
c->bit_rate = 400000;
// resolution must be a multiple of two
c->width = img->x;
c->height = img->y;
free(img);
// frames per second
c->time_base= (AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames
//c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
// open it
if (avcodec_open(c, codec) < 0){
log_message("codec couldn't open");
return -1;
}
//destfilename = (*env)->GetStringUTFChars(env, dst, 0);
f = fopen(destfilename, "wb");
log_message(destfilename);
if (!f) {
sprintf(err, "could not open %s", destfilename);
log_message(err);
}
log_message("after destination file opening");
// alloc image and output buffer
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
size = c->width * c->height;
picture_buf = malloc(size * 3); // size for RGB
picture->data[0] = picture_buf;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size / 4;
picture->linesize[0] = c->width;
picture->linesize[1] = c->width / 2;
picture->linesize[2] = c->width / 2;
numBytes=avpicture_get_size(PIX_FMT_YUV420P, c->width,
c->height);
buffer=malloc(numBytes);
// Assign appropriate parts of buffer to image planes in FrameYUV
avpicture_fill((AVPicture *)yuvFrame, buffer, PIX_FMT_YUV420P,
c->width, c->height);
// encode the video
log_message("before for loop");
for(z=1;z<frameCount;z++) {
sprintf(flname,"/sdcard/frame%d.ppm",z);
// read the ppm file
img = getPPM(flname);
picture->data[0] = img->data;
// convert the rgb frame into yuv frame
rgb2yuv(picture,yuvFrame,c);
log_message("translation completed.");
// encode the image
out_size = avcodec_encode_video(c, outbuf, outbuf_size, yuvFrame);
sprintf(info,"encoding frame %3d (size=%5d)\n", z, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
free(img);
}
// get the delayed frames
for(; out_size; i++) {
//fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
sprintf(info,"write frame %3d (size=%5d)\n", i, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(picture_buf);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(picture);
av_free(yuvFrame);
}
int rgb2yuv(AVFrame *frameRGB, AVFrame *frameYUV, AVCodecContext *c)
{
char *err;
static struct SwsContext *img_convert_ctx;
log_message("conversion starts");
// Convert the image into YUV format from RGB format
if(img_convert_ctx == NULL) {
int w = c->width;
int h = c->height;
img_convert_ctx = sws_getContext(w, h, PIX_FMT_RGB24,w, h, c->pix_fmt, SWS_BICUBIC,NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
sprintf(err, "Cannot initialize the conversion context!\n");
log_message(err);
return -1;
}
}
int ret = sws_scale(img_convert_ctx,frameRGB->data, frameRGB->linesize , 0,c->height,frameYUV->data, frameYUV->linesize );
return;
}