FFMPEG sws_scale Crash on Android - android

I have an app that convert images to video, in Google Play I see the following crash (which the only details I get is the name of the function and I don't understand the rest):
backtrace:
#00 pc 0000cc78 /data/app-lib/com.myapp-1/libswscale.so (sws_scale+204)
#01 pc 000012af /data/app-lib/com.myapp-1/libffmpeg.so (OpenImage+322)
code around pc:
79065c58 e58d8068 e58d2070 e58d3074 059d00b0
The code point to the function sws_scale, the code works almost all the time on my device (Nexus 5) but I see a lot of reports even with the same device with that issue. Any idea why this could happen?
AVFrame* OpenImage(const char* imageFileName, int W_VIDEO, int H_VIDEO, int* numBytes)
{
AVFormatContext *pFormatCtx;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
int frameFinished;
uint8_t *buffer;
AVPacket packet;
int srcBytes;
AVFrame* frame2 = NULL;// scaled frame
uint8_t* frame2_buffer;
struct SwsContext *resize;
if(av_open_input_file(&pFormatCtx, imageFileName, NULL, 0, NULL)!=0)
{
LOGI("Can't open image file '%s'\n", imageFileName);
return NULL;
}
//dump_format(pFormatCtx, 0, imageFileName, 0);
if (av_find_stream_info(pFormatCtx) < 0)
{
LOGI("Can't find stream info.");
return NULL;
}
pCodecCtx = pFormatCtx->streams[0]->codec;
pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
// Find the decoder for the video stream
pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (!pCodec)
{
LOGI("Codec not found\n");
return NULL;
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
{
LOGI("Could not open codec\n");
return NULL;
}
pFrame = avcodec_alloc_frame();
if (!pFrame)
{
LOGI("Can't allocate memory for AVFrame\n");
return NULL;
}
// Determine required buffer size and allocate buffer
srcBytes = avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
buffer = (uint8_t *) av_malloc(srcBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrame, buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
// Read frame
if (av_read_frame(pFormatCtx, &packet) >= 0)
{
int ret;
// if(packet.stream_index != 0)
// continue;
ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (ret > 0)
{
//LOGI("Frame is decoded, size %d\n", ret);
pFrame->quality = 4;
// Create another frame for resized result
frame2 = avcodec_alloc_frame();
*numBytes = avpicture_get_size(PIX_FMT_YUV420P, W_VIDEO, H_VIDEO);
frame2_buffer = (uint8_t *)av_malloc(*numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture*)frame2, frame2_buffer, PIX_FMT_YUV420P, W_VIDEO, H_VIDEO);
// Get resize context
resize = sws_getContext(pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, W_VIDEO, H_VIDEO, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
// frame2 should be filled with resized samples
ret = sws_scale(resize, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, frame2->data, frame2->linesize);
sws_freeContext(resize);
}
else
LOGI("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
}
av_free(pFrame);
av_free_packet(&packet);
avcodec_close(pCodecCtx);
//av_free(pCodecCtx);
av_close_input_file(pFormatCtx);
return frame2;
}

After your avcodec_decode_video2, do not check ret only. You need to check frameFinished too. if frameFinished == 0, your frame should not be used (because not filled). I do not know for images, but when you decode video, it happens very often. You need to read the next packet and give to the next call of avcodec_decode_video2.
On a side note: why are you forcing pCodecCtx->pix_fmt = PIX_FMT_YUV420P? It is automatically set to the correct format by av_find_stream_info, and you should use it as sws_getContext parameter.
Last thing: no need to fill your pFramewith avpicture_fill. You only need to av_frame_alloc() it, and avcodec_decode_video2 will take care of filling it.

After this line
*numBytes = avpicture_get_size(PIX_FMT_YUV420P, W_VIDEO, H_VIDEO);
You have not checked the return value.
In the avpicture_get_size describe:
"Returns:
the computed picture buffer size or a negative error code in case of error"
When you check the *numBytes (and srcBytes, buffer, frame2_buffer) value(s), maybe it will be better...

Related

How to achive better quality with libav on android surface?

Playing video from RTSP stream on Android surface using solution from this repo
https://github.com/alexandruc/android-ffmpeg-player/blob/master/android-ffmpeg-player/jni/ffmpeg-player.c
Video is playng but have a lot of glitches especially when something moving.
Have not enought expirience using libav.
Will be happy if someone can help or give links on some tutorials or community.
Here is the function to display video on surface.
void* decodeAndRender(void *voidArgs) {
auto *args = (decode_args*)voidArgs;
CamCon* cc = getCamCon(args->name);
ANativeWindow_Buffer windowBuffer;
AVPacket packet;
int i=0;
int frameFinished;
int lineCnt;
int counter = 0;
while(av_read_frame(cc->formatCtx, &packet)>=0 && cc->isConnect) {
counter = 1;
// Is this a packet from the video stream?
if(packet.stream_index==cc->videoStreamIdx) {
// Decode video frame
avcodec_decode_video2(cc->codecCtx, cc->decodedFrame, &frameFinished, &packet);
// Did we get a video frame?
if(frameFinished) {
// RECORD video
if(cc->isRecord)
recordMP4(packet, cc);
// DISPLAY video
// Convert the image from its native format to RGBA
sws_scale (
cc->sws_ctx,
(uint8_t const * const *)cc->decodedFrame->data,
cc->decodedFrame->linesize,
0,
cc->codecCtx->height,
cc->frameRGBA->data,
cc->frameRGBA->linesize
);
// lock the window buffer
if (ANativeWindow_lock(cc->window, &windowBuffer, NULL) < 0) {
LOGE("cannot lock window");
} else {
// draw the frame on buffer
LOGI("copy buffer %d:%d:%d", cc->displayWidth, cc->displayHeight, cc->displayWidth * cc->displayHeight*4);
LOGI("window buffer: %d:%d:%d", windowBuffer.width,
windowBuffer.height, windowBuffer.stride);
memcpy(windowBuffer.bits, cc->buffer, cc->displayWidth * cc->displayHeight * 4);
// unlock the window buffer and post it to display
ANativeWindow_unlockAndPost(cc->window);
// count number of frames
++i;
}
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
LOGI("total No. of frames decoded and rendered %d", i);
finish(args->env, args->name);
}

Streaming client over TCP and RTSP through Wi-Fi or LAN in Android

I am struggling to develop streaming client for DVR camera’s, I tried with VLC Media player through RTSP protocol I got the solution (used Wi-Fi standard model like, Netgear etc.,), but the same code is not supporting for other Wi-Fi Modem’s, now am working with FFMPEG framework to implement the streaming client in android using JNI API. Not getting any proper idea to implement JNI api
Network Camera working with IP Cam Viewer App
code below,
/*****************************************************/
/* functional call */
/*****************************************************/
jboolean Java_FFmpeg_allocateBuffer( JNIEnv* env, jobject thiz )
{
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return 0;
sprintf(debugMsg, "%d %d", screenWidth, screenHeight);
INFO(debugMsg);
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(dstFmt, screenWidth, screenHeight);
/*
numBytes=avpicture_get_size(dstFmt, pCodecCtx->width,
pCodecCtx->height);
*/
buffer=(uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, dstFmt, screenWidth, screenHeight);
return 1;
}
/* for each decoded frame */
jbyteArray Java_FFmpeg_getNextDecodedFrame( JNIEnv* env, jobject thiz )
{
av_free_packet(&packet);
while(av_read_frame(pFormatCtx, &packet)>=0) {
if(packet.stream_index==videoStream) {
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if(frameFinished) {
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, screenWidth, screenHeight, dstFmt, SWS_BICUBIC, NULL, NULL, NULL);
/*
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, dstFmt, SWS_BICUBIC, NULL, NULL, NULL);
*/
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
0, pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
++frameCount;
/* uint8_t == unsigned 8 bits == jboolean */
jbyteArray nativePixels = (*env)->NewByteArray(env, numBytes);
(*env)->SetByteArrayRegion(env, nativePixels, 0, numBytes, buffer);
return nativePixels;
}
}
av_free_packet(&packet);
}
return NULL;
}
/*****************************************************/
/* / functional call */
/*****************************************************/
jstring
Java_FFmpeg_play( JNIEnv* env, jobject thiz, jstring jfilePath )
{
INFO("--- Play");
char* filePath = (char *)(*env)->GetStringUTFChars(env, jfilePath, NULL);
RE(filePath);
/*****************************************************/
AVFormatContext *pFormatCtx;
int i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVFrame *pFrame;
AVPacket packet;
int frameFinished;
float aspect_ratio;
struct SwsContext *img_convert_ctx;
INFO(filePath);
/* FFmpeg */
av_register_all();
if(av_open_input_file(&pFormatCtx, filePath, NULL, 0, NULL)!=0)
RE("failed av_open_input_file ");
if(av_find_stream_info(pFormatCtx)<0)
RE("failed av_find_stream_info");
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
RE("failed videostream == -1");
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
RE("Unsupported codec!");
}
if(avcodec_open(pCodecCtx, pCodec)<0)
RE("failed codec_open");
pFrame=avcodec_alloc_frame();
/* /FFmpeg */
INFO("codec name:");
INFO(pCodec->name);
INFO("Getting into stream decode:");
/* video stream */
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
if(packet.stream_index==videoStream) {
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if(frameFinished) {
++i;
INFO("frame finished");
AVPicture pict;
/*
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize,
0, pCodecCtx->height, pict.data, pict.linesize);
*/
}
}
av_free_packet(&packet);
}
/* /video stream */
av_free(pFrame);
avcodec_close(pCodecCtx);
av_close_input_file(pFormatCtx);
RE("end of main");
}
I can't able to get the frames from Network camera
And give some idea to implement the live stream client for DVR camera in Android

Where the decoded frame by ffmepg stored?

I try to decode video and convert frame to rgb32 or gb565le format.
Then pass this frame from C to Android buffer by JNI.
So far, I know to how pass buffer from C to Android as well as how to decode video and get decoded frame.
My question is how to convert decoded frame to rgb32 (or rgb565le) and where is it stored?
The following is my code, I'm not sure is correct or not.
-Jargo
img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 100, 100, PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
if(!img_convert_ctx) return -6;
while(av_read_frame(pFormatCtx, &packet) >= 0) {
// Is this a packet from the video stream?
if(packet.stream_index == videoStream) {
avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
// Did we get a video frame?
if(frameFinished) {
AVPicture pict;
if(avpicture_alloc(&pict, PIX_FMT_RGB32, 100, 100) >= 0) {
sws_scale(img_convert_ctx, (const uint8_t * const *)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pict.data, pict.linesize);
}
} // End of if( frameFinished )
} // End of if( packet.stream_index == videoStream )
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
The decoded frame goes into pict. (pFrame is a raw frame.)
100x100 is probably no good you have to calculate pict size based on pFrame size.
I guess it should be pFrame->width*pFrame->height*32;
You have to allocate pict yourself.
See this tutorial http://dranger.com/ffmpeg/

Encode image to video using ffmpeg (sws_scale)

I'm trying to encode an image to video using ffmpeg library.
I have these global params:
//Global params
AVCodec *codec;
AVCodecContext *codecCtx;
uint8_t *output_buffer;
int output_buffer_size;
I divided the encoding to 3 methods:
Initialize the encoder:
jint Java_com_camera_simpledoublewebcams2_CameraPreview_initencoder(JNIEnv* env,jobject thiz){
avcodec_register_all();
avcodec_init();
av_register_all();
int fps = 30;
/* find the H263 video encoder */
codec = avcodec_find_encoder(CODEC_ID_H263);
if (!codec) {
LOGI("avcodec_find_encoder() run fail.");
return -5;
}
//allocate context
codecCtx = avcodec_alloc_context();
/* put sample parameters */
codecCtx->bit_rate = 400000;
/* resolution must be a multiple of two */
codecCtx->width = 176;
codecCtx->height = 144;
/* frames per second */
codecCtx->time_base = (AVRational){1,fps};
codecCtx->pix_fmt = PIX_FMT_YUV420P;
codecCtx->codec_id = CODEC_ID_H263;
codecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
/* open it */
if (avcodec_open(codecCtx, codec) < 0) {
LOGI("avcodec_open() run fail.");
return -10;
}
//init buffer
output_buffer_size = 500000;
output_buffer = malloc(output_buffer_size);
return 0;
}
Encoding the image:
jint Java_com_camera_simpledoublewebcams2_CameraPreview_encodejpeg(JNIEnv* env,jobject thiz,jchar* cImage, jint imageSize){
int out_size;
AVFrame *picture;
AVFrame *outpic;
uint8_t *outbuffer;
//allocate frame
picture = avcodec_alloc_frame();
outpic = avcodec_alloc_frame();
int nbytes = avpicture_get_size(PIX_FMT_YUV420P, codecCtx->width, codecCtx->height);
outbuffer = (uint8_t*)av_malloc(nbytes);
outpic->pts = 0;
//fill picture with image
avpicture_fill((AVPicture*)picture, (uint8_t*)cImage, PIX_FMT_RGBA, codecCtx->width, codecCtx->height);
//fill outpic with empty image
avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, codecCtx->width, codecCtx->height);
//rescale the image
struct SwsContext* fooContext = sws_getContext(codecCtx->width, codecCtx->height,
PIX_FMT_RGBA,
codecCtx->width, codecCtx->height,
PIX_FMT_YUV420P,
SWS_FAST_BILINEAR, NULL, NULL, NULL);
sws_scale(fooContext, picture->data, picture->linesize, 0, codecCtx->height, outpic->data, outpic->linesize);
//encode the image
out_size = avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, outpic);
out_size += avcodec_encode_video(codecCtx, output_buffer, output_buffer_size, outpic);
//release pictures
av_free(outbuffer);
av_free(picture);
av_free(outpic);
return out_size;
}
And closing the encoder:
void Java_com_camera_simpledoublewebcams2_CameraPreview_closeencoder(JNIEnv* env,jobject thiz){
free(output_buffer);
avcodec_close(codecCtx);
av_free(codecCtx);
}
When I send the first image, I get a result from the encoder. When I try to send another image the program crashes.
I tried calling init once and then the images, then the close - didn't work.
I tried calling the init and the close for every image - didn't work.
Any suggestions?
Thanks!
EDIT: After further research I found that the problem is at sws_scale method.
Still don't know what is causing this issue...
out_size = avcodec_encode_video(codecCtx, output_buffer,output_buffer_size, outpic);
out_size += avcodec_encode_video(codecCtx, output_buffer, output_buffer_size,outpic);
Why are you encoding twice?
Perhaps the error is due to that double encoding. Try removing the second encoding.

FFmpeg encoding in c

I have been working on a project about video summarization on android platfrom. and I am stuck in encoding. I think;
first I must convert my frame into RGB Frame, then convert that RGB FRame into YUV Frame. Then encode the frame. After this operations, The output video was so weird. I think I missed something. Here is my las optimized code. Maybe someone has an experiement in this subject.
Its syntax is changed according to android ndk syntax:
jint Java_com_test_Test_encodeVideo(JNIEnv* env, jobject javaThis)
{
char *flname, *err, *info;
AVCodec *codec;
AVCodecContext *c= NULL;
int i,out_size, size, x, y,z, outbuf_size;
int frameCount=99;
FILE *f;
AVFrame *picture, *yuvFrame;
uint8_t *outbuf, *picture_buf;
PPMImage *img;
const char *destfilename = "/sdcard/new.mp4";
int numBytes;
uint8_t *buffer;
av_register_all();
// must be called before using avcodec lib
avcodec_init();
// register all the codecs
avcodec_register_all();
log_message("Video encoding\n");
// find the H263 video encoder
codec = avcodec_find_encoder(CODEC_ID_H263);
if (!codec) {
sprintf(err, "codec not found\n");
log_message(err);
}
c= avcodec_alloc_context();
picture= avcodec_alloc_frame();
yuvFrame= avcodec_alloc_frame();
// get first ppm context. it is because I need width and height values.
img = getPPM("/sdcard/frame1.ppm");
c->bit_rate = 400000;
// resolution must be a multiple of two
c->width = img->x;
c->height = img->y;
free(img);
// frames per second
c->time_base= (AVRational){1,25};
c->gop_size = 10; // emit one intra frame every ten frames
//c->max_b_frames=1;
c->pix_fmt = PIX_FMT_YUV420P;
// open it
if (avcodec_open(c, codec) < 0){
log_message("codec couldn't open");
return -1;
}
//destfilename = (*env)->GetStringUTFChars(env, dst, 0);
f = fopen(destfilename, "wb");
log_message(destfilename);
if (!f) {
sprintf(err, "could not open %s", destfilename);
log_message(err);
}
log_message("after destination file opening");
// alloc image and output buffer
outbuf_size = 100000;
outbuf = malloc(outbuf_size);
size = c->width * c->height;
picture_buf = malloc(size * 3); // size for RGB
picture->data[0] = picture_buf;
picture->data[1] = picture->data[0] + size;
picture->data[2] = picture->data[1] + size / 4;
picture->linesize[0] = c->width;
picture->linesize[1] = c->width / 2;
picture->linesize[2] = c->width / 2;
numBytes=avpicture_get_size(PIX_FMT_YUV420P, c->width,
c->height);
buffer=malloc(numBytes);
// Assign appropriate parts of buffer to image planes in FrameYUV
avpicture_fill((AVPicture *)yuvFrame, buffer, PIX_FMT_YUV420P,
c->width, c->height);
// encode the video
log_message("before for loop");
for(z=1;z<frameCount;z++) {
sprintf(flname,"/sdcard/frame%d.ppm",z);
// read the ppm file
img = getPPM(flname);
picture->data[0] = img->data;
// convert the rgb frame into yuv frame
rgb2yuv(picture,yuvFrame,c);
log_message("translation completed.");
// encode the image
out_size = avcodec_encode_video(c, outbuf, outbuf_size, yuvFrame);
sprintf(info,"encoding frame %3d (size=%5d)\n", z, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
free(img);
}
// get the delayed frames
for(; out_size; i++) {
//fflush(stdout);
out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
sprintf(info,"write frame %3d (size=%5d)\n", i, out_size);
log_message(info);
fwrite(outbuf, 1, out_size, f);
}
// add sequence end code to have a real mpeg file
outbuf[0] = 0x00;
outbuf[1] = 0x00;
outbuf[2] = 0x01;
outbuf[3] = 0xb7;
fwrite(outbuf, 1, 4, f);
fclose(f);
free(picture_buf);
free(outbuf);
avcodec_close(c);
av_free(c);
av_free(picture);
av_free(yuvFrame);
}
int rgb2yuv(AVFrame *frameRGB, AVFrame *frameYUV, AVCodecContext *c)
{
char *err;
static struct SwsContext *img_convert_ctx;
log_message("conversion starts");
// Convert the image into YUV format from RGB format
if(img_convert_ctx == NULL) {
int w = c->width;
int h = c->height;
img_convert_ctx = sws_getContext(w, h, PIX_FMT_RGB24,w, h, c->pix_fmt, SWS_BICUBIC,NULL, NULL, NULL);
if(img_convert_ctx == NULL) {
sprintf(err, "Cannot initialize the conversion context!\n");
log_message(err);
return -1;
}
}
int ret = sws_scale(img_convert_ctx,frameRGB->data, frameRGB->linesize , 0,c->height,frameYUV->data, frameYUV->linesize );
return;
}

Categories

Resources