Affdex SDK Ubuntu Compilation issues - android

I am trying to compile the following wrapper library code to use the Affectiva library for Ubuntu:
#include <iostream>
#include <memory>
#include <chrono>
#include <fstream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <boost/filesystem.hpp>
#include <boost/timer/timer.hpp>
#include <boost/program_options.hpp>
#include "VideoDetector.h"
#include "PhotoDetector.h"
#include "AffdexException.h"
#include "PlottingImageListener.hpp"
#include "StatusListener.hpp"
using namespace std;
using namespace affdex;
int main(int argsc, char * argsv) {
std::map<boost::filesystem::path, bool> VIDEO_EXTS = { {boost::filesystem::path(".avi"), 1},
{boost::filesystem::path(".mov"), 1},
{boost::filesystem::path(".flv"), 1},
{boost::filesystem::path(".webm"), 1},
{boost::filesystem::path(".wmv"), 1},
{boost::filesystem::path(".mp4"), 1} };
affdex::path DATA_FOLDER;
affdex::path videoPath;
int process_framerate = 30;
bool draw_display = true;
bool loop = false;
unsigned int nFaces = 1;
int faceDetectorMode = (int)FaceDetectorMode::LARGE_FACES;
const int precision = 2;
std::cerr.precision(precision);
std::cout.precision(precision);
namespace po = boost::program_options; // abbreviate namespace
po::options_description description("Project for demoing the Affdex SDK VideoDetector class (processing video files).");
description.add_options()
#ifdef _WIN32
("data,d", po::wvalue< affdex::path >(&DATA_FOLDER)->default_value(affdex::path(L"data"), std::string("data")), "Path to the data folder")
("input,i", po::wvalue< affdex::path >(&videoPath)->required(), "Video file to processs")
#else // _WIN32
("data,d", po::value< affdex::path >(&DATA_FOLDER)->default_value(affdex::path("data"), std::string("data")), "Path to the data folder")
("input,i", po::value< affdex::path >(&videoPath)->required(), "Video file to processs")
#endif // _WIN32
("pfps", po::value< int >(&process_framerate)->default_value(30), "Processing framerate.")
("faceMode", po::value< int >(&faceDetectorMode)->default_value((int)FaceDetectorMode::SMALL_FACES), "Face detector mode (large faces vs small faces).")
("numFaces", po::value< unsigned int >(&nFaces)->default_value(1), "Number of faces to be tracked.")
("loop", po::value< bool >(&loop)->default_value(false), "Loop over the video being processed.")
;
po::variables_map args;
try
{
po::store(po::command_line_parser(argsc, &argsv).options(description).run(), args);
if (args["help"].as<bool>())
{
std::cout << description << std::endl;
return 0;
}
po::notify(args);
}
catch (po::error& e)
{
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
std::cerr << "For help, use the -h option." << std::endl << std::endl;
return 1;
}
// Parse and check the data folder (with assets)
if (!boost::filesystem::exists(DATA_FOLDER))
{
std::cerr << "Data folder doesn't exist: " << std::string(DATA_FOLDER.begin(), DATA_FOLDER.end()) << std::endl;
std::cerr << "Try specifying the folder through the command line" << std::endl;
std::cerr << description << std::endl;
return 1;
}
try
{
std::shared_ptr<Detector> detector;
//Initialize out file
boost::filesystem::path csvPath(videoPath);
boost::filesystem::path fileExt = csvPath.extension();
csvPath.replace_extension(".csv");
std::ofstream csvFileStream(csvPath.c_str());
if (!csvFileStream.is_open())
{
std::cerr << "Unable to open csv file " << csvPath << std::endl;
return 1;
}
if (VIDEO_EXTS[fileExt]) // IF it is a video file.
{
detector = std::make_shared<VideoDetector>(process_framerate, nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
}
else //Otherwise it's a photo
{
detector = std::make_shared<PhotoDetector>(nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
}
//VideoDetector videoDetector(process_framerate, nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
std::cout << "Max num of faces set to: " << detector->getMaxNumberFaces() << std::endl;
std::string mode;
switch (detector->getFaceDetectorMode())
{
case FaceDetectorMode::LARGE_FACES:
mode = "LARGE_FACES";
break;
case FaceDetectorMode::SMALL_FACES:
mode = "SMALL_FACES";
break;
default:
break;
}
std::cout << "Face detector mode set to: " << mode << std::endl;
shared_ptr<PlottingImageListener> listenPtr(new PlottingImageListener(csvFileStream, draw_display));
detector->setDetectAllEmotions(true);
detector->setDetectAllExpressions(true);
detector->setDetectAllEmojis(true);
detector->setDetectAllAppearances(true);
detector->setClassifierPath(DATA_FOLDER);
detector->setImageListener(listenPtr.get());
detector->start(); //Initialize the detectors .. call only once
do
{
shared_ptr<StatusListener> videoListenPtr = std::make_shared<StatusListener>();
detector->setProcessStatusListener(videoListenPtr.get());
if (VIDEO_EXTS[fileExt])
{
((VideoDetector *)detector.get())->process(videoPath); //Process a video
}
else
{
//videoPath is of type std::wstring on windows, but std::string on other platforms.
cv::Mat img = cv::imread(std::string(videoPath.begin(), videoPath.end()));
// Create a frame
Frame frame(img.size().width, img.size().height, img.data, Frame::COLOR_FORMAT::BGR);
((PhotoDetector *)detector.get())->process(frame); //Process an image
}
do
{
if (listenPtr->getDataSize() > 0)
{
std::pair<Frame, std::map<FaceId, Face> > dataPoint = listenPtr->getData();
Frame frame = dataPoint.first;
std::map<FaceId, Face> faces = dataPoint.second;
if (draw_display)
{
listenPtr->draw(faces, frame);
}
std::cerr << "timestamp: " << frame.getTimestamp()
<< " cfps: " << listenPtr->getCaptureFrameRate()
<< " pfps: " << listenPtr->getProcessingFrameRate()
<< " faces: "<< faces.size() << endl;
listenPtr->outputToFile(faces, frame.getTimestamp());
}
} while (VIDEO_EXTS[fileExt] && (videoListenPtr->isRunning() || listenPtr->getDataSize() > 0));
} while(loop);
detector->stop();
csvFileStream.close();
std::cout << "Output written to file: " << csvPath << std::endl;
}
catch (AffdexException ex)
{
std::cerr << ex.what();
}
return 0;
}
My command is:
g++-4.8 VidDetector.cpp -o VidDetector -std=c++11 -I/root/affdex-sdk/include -I/root/affdexUbuntu/include -I/root/sdk-samples/common -L/root/affdex-sdk/lib -l affdex-native -L/usr/lib/x86_64-linux-gnu -l boost_system -l boost_filesystem
The result is an issue with linking the boost library:
VidDetector.cpp:(.text+0x8d9): undefined reference to `boost::program_options::variables_map::variables_map()'
VidDetector.cpp:(.text+0x943): undefined reference to `boost::program_options::store(boost::program_options::basic_parsed_options<char> const&, boost::program_options::variables_map&, bool)'
VidDetector.cpp:(.text+0x9e5): undefined reference to `boost::program_options::operator<<(std::ostream&, boost::program_options::options_description const&)'
VidDetector.cpp:(.text+0xa0b): undefined reference to `boost::program_options::notify(boost::program_options::variables_map&)'
VidDetector.cpp:(.text+0xb1a): undefined reference to `boost::program_options::operator<<(std::ostream&, boost::program_options::options_description const&)'
/tmp/ccUPvUA2.o: In function `boost::program_options::error_with_option_name::~error_with_option_name()':
I am not sure how to resolve these issues. I have tried including the boost .so files with g++ but it still is not finding the references to the necessary files. What am I missing in the command that will fix this issue?

Related

Android NDK: How to find load relocation for a PIE binary / how to get _r_debug.r_map->l_addr

This is sister-question of How to find load relocation for a PIE binary? which was solved by using _r_debug.r_map->l_addr. Now I need to do this on Android NDK 19.
I need to print backtrace without symbols. I can do this with following code:
#include <iostream>
#include <iomanip>
#include <unwind.h>
#include <dlfcn.h>
#include <sstream>
#include <android/log.h>
namespace {
struct BacktraceState
{
void** current;
void** end;
};
static _Unwind_Reason_Code unwindCallback(struct _Unwind_Context* context, void* arg)
{
BacktraceState* state = static_cast<BacktraceState*>(arg);
uintptr_t pc = _Unwind_GetIP(context);
if (pc) {
if (state->current == state->end) {
return _URC_END_OF_STACK;
} else {
*state->current++ = reinterpret_cast<void*>(pc);
}
}
return _URC_NO_REASON;
}
}
size_t captureBacktrace(void** buffer, size_t max)
{
BacktraceState state = {buffer, buffer + max};
_Unwind_Backtrace(unwindCallback, &state);
return state.current - buffer;
}
namespace clog {
std::string bt(int from, int max_stack) {
const size_t MAX_STACK = 30;
if (from >= max_stack)
from = 0;
if (max_stack > MAX_STACK)
max_stack = MAX_STACK;
void* array[MAX_STACK];
std::ostringstream msg;
size_t size = captureBacktrace(array, MAX_STACK);
if (size >= 2) {
msg << "[bt]" << std::hex;
for (int i = from; i < size; ++i) {
msg << "0x" << reinterpret_cast<uintptr_t>(array[i]) << " ";
}
msg << "[/bt]" << std::dec;
}
return msg.str();
}
}
It prints addresses that include offset from ASLR. On Linux I fixed this by changing
msg << "0x" << reinterpret_cast<uintptr_t>(array[i]) << " ";
to
#include <execinfo.h>
#include <link.h>
...
static const uintptr_t relocation = _r_debug.r_map->l_addr;
msg << "0x" << reinterpret_cast<uintptr_t>(array[i]) - reinterpret_cast<uintptr_t>(relocation) << " ";
_r_debug.r_map->l_addr is not available on Android as far as I can see. Is there some equivalent?

Can the gstreamer play the media content which sended using this client code below?

I want to write client server application to stream media content from pc (linux) to android. Client side code i have decided to write using posix sockets. On sever side (on android device) i think to use gstreamer api. Client code is shown below in listing
I interested in this question, can the gstreamer play the media content which sended using this client code ?
#include <arpa/inet.h>
#include <iostream>
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/socket.h>
#include <unistd.h>
int main(int argc, char** argv)
{
if(argc < 2) {
std::cout << "\nError: Missing parameter" << std::endl;
std::cout << "Usage:\n\tmain <file path>\n" << std::endl;
exit(0);
}
int len = strlen(argv[1]);
char* file_name = new char[len];
file_name = argv[1];
FILE* file = fopen(file_name, "rb");
if (file == NULL)
{
std::cout << "Error: Cannot open the file by given path" << std::endl;
fclose (file);
}
char buf[portion];
bzero((void*)buf, portion);
int quantum = 0;
sockaddr_in serverAddr, clientAddr;
int sockfd = socket(AF_INET, SOCK_STREAM, 0);
serverAddr.sin_family = AF_INET;
serverAddr.sin_port = htons(2000);
serverAddr.sin_addr.s_addr = inet_addr("127.0.0.1");
connect(sockfd, (struct sockaddr*)&serverAddr, sizeof(serverAddr));
bzero((void*)buf, portion);
int bytes_read = 0;
while( (bytes_read = fread(buf, 1, 8192, file) ) > 0) {
int s = send(sockfd, buf, bytes_read, 0);
bzero((void*)buf, portion);
if (s == -1) {
std::cout << "Error: The data connot be sended" << std::endl;
}
std::cout << " . ";
}
std::cout << "Done" << std::endl;
return 0;
}
Among many other things you can use tcpserversrc with host=127.0.0.1 and port=2000. This will create a source element that will listen for a TCP connection on port 2000 of 127.0.0.1 and then pass the data to a gstreamer pipeline.

Android Opensles resampling PCM with FFmpeg

I'm trying to play an audio stream with ffmpeg and opensles in android. And the problem seems to be when passing the decoded and resampled frames from ffmpeg to opensles as the sound I can hear sounds robotic and has scratching.
The decoded frames from ffmpeg:
PCM
48000 Hz
S16p
Opensles needs in this case:
PCM
48000 Hz
S16
Opensles setup:
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 255};
SLDataFormat_PCM format_pcm = { SL_DATAFORMAT_PCM, 2 , SL_SAMPLINGRATE_48, SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, SL_BYTEORDER_LITTLEENDIAN};
SLDataSource audioSrc = {&loc_bufq, &format_pcm};
This is the pseudocode for resampling and enqueueing to opensles:
#define OPENSLES_BUFLEN 10
#define MAX_AUDIO_FRAME_SIZE 192000
DECLARE_ALIGNED(16,uint8_t,audio_buffer)[MAX_AUDIO_FRAME_SIZE * OPENSLES_BUFLEN];
int decode_audio(AVCodecContext * ctx, SwrContext *swr_context, AVPacket *packet, AVFrame * frame){
int got_frame_ptr;
int len = avcodec_decode_audio4(ctx, frame, &got_frame_ptr, packet);
if(!got_frame_ptr)
return -ERROR;
int original_data_size = av_samples_get_buffer_size(NULL, ctx->channels,
frame->nb_samples, ctx->sample_fmt, 1);
uint8_t *audio_buf;
int data_size;
if (swr_context != NULL) {
uint8_t *out[] = { audio_buffer };
int sample_per_buffer_divider = 2* av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);;
int len2 = swr_convert(swr_context, out,
sizeof(audio_buffer) / sample_per_buffer_divider,
frame->extended_data, frame->nb_samples);
if (len2 < 0) {
return -ERROR;
}
if (len2 == sizeof(audio_buffer) / sample_per_buffer_divider) {
swr_init(swr_context);
}
audio_buf = audio_buffer;
data_size = len2 * sample_per_buffer_divider;
}
else {
audio_buf = frame->data[0];
data_size = original_data_size;
}
(*opengSLESData->bqPlayerBufferQueue)->Enqueue(opengSLESData->bqPlayerBufferQueue, audio_buf, data_size)
}
I would appreciate any help, thanks.
example may help
#include "stdafx.h"
#include <iostream>
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
//#include "swscale.h"
#include "libswresample/swresample.h"
};
FILE *fin, *fout;
int ffmpeg_audio_decode( const char * inFile, const char * outFile)
{
// Initialize FFmpeg
av_register_all();
AVFrame* frame = avcodec_alloc_frame();
if (!frame)
{
std::cout << "Error allocating the frame" << std::endl;
return 1;
}
// you can change the file name "01 Push Me to the Floor.wav" to whatever the file is you're reading, like "myFile.ogg" or
// "someFile.webm" and this should still work
AVFormatContext* formatContext = NULL;
//if (avformat_open_input(&formatContext, "01 Push Me to the Floor.wav", NULL, NULL) != 0)
if (avformat_open_input(&formatContext, inFile, NULL, NULL) != 0)
{
av_free(frame);
std::cout << "Error opening the file" << std::endl;
return 1;
}
if (avformat_find_stream_info(formatContext, NULL) < 0)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Error finding the stream info" << std::endl;
return 1;
}
AVStream* audioStream = NULL;
// Find the audio stream (some container files can have multiple streams in them)
for (unsigned int i = 0; i < formatContext->nb_streams; ++i)
{
if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioStream = formatContext->streams[i];
break;
}
}
if (audioStream == NULL)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Could not find any audio stream in the file" << std::endl;
return 1;
}
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = avcodec_find_decoder(codecContext->codec_id);
if (codecContext->codec == NULL)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Couldn't find a proper decoder" << std::endl;
return 1;
}
else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Couldn't open the context with the decoder" << std::endl;
return 1;
}
std::cout << "This stream has " << codecContext->channels << " channels and a sample rate of " << codecContext->sample_rate << "Hz" << std::endl;
std::cout << "The data is in the format " << av_get_sample_fmt_name(codecContext->sample_fmt) << std::endl;
//codecContext->sample_fmt = AV_SAMPLE_FMT_S16;
int64_t outChannelLayout = AV_CH_LAYOUT_MONO; //AV_CH_LAYOUT_STEREO;
AVSampleFormat outSampleFormat = AV_SAMPLE_FMT_S16; // Packed audio, non-planar (this is the most common format, and probably what you want; also, WAV needs it)
int outSampleRate = 8000;//44100;
// Note that AVCodecContext::channel_layout may or may not be set by libavcodec. Because of this,
// we won't use it, and will instead try to guess the layout from the number of channels.
SwrContext* swrContext = swr_alloc_set_opts(NULL,
outChannelLayout,
outSampleFormat,
outSampleRate,
av_get_default_channel_layout(codecContext->channels),
codecContext->sample_fmt,
codecContext->sample_rate,
0,
NULL);
if (swrContext == NULL)
{
av_free(frame);
avcodec_close(codecContext);
avformat_close_input(&formatContext);
std::cout << "Couldn't create the SwrContext" << std::endl;
return 1;
}
if (swr_init(swrContext) != 0)
{
av_free(frame);
avcodec_close(codecContext);
avformat_close_input(&formatContext);
swr_free(&swrContext);
std::cout << "Couldn't initialize the SwrContext" << std::endl;
return 1;
}
fout = fopen(outFile, "wb+");
AVPacket packet;
av_init_packet(&packet);
// Read the packets in a loop
while (av_read_frame(formatContext, &packet) == 0)
{
if (packet.stream_index == audioStream->index)
{
AVPacket decodingPacket = packet;
while (decodingPacket.size > 0)
{
// Try to decode the packet into a frame
int frameFinished = 0;
int result = avcodec_decode_audio4(
codecContext,
frame,
&frameFinished,
&decodingPacket);
if (result < 0 || frameFinished == 0)
{
break;
}
unsigned char buffer[100000] = {NULL};
unsigned char* pointers[SWR_CH_MAX] = {NULL};
pointers[0] = &buffer[0];
int numSamplesOut = swr_convert(
swrContext,
pointers,
outSampleRate,
(const unsigned char**)frame->extended_data,
frame->nb_samples);
fwrite(
(short *)buffer,
sizeof(short),
(size_t)numSamplesOut,
fout);
decodingPacket.size -= result;
decodingPacket.data += result;
}
}
// You *must* call av_free_packet() after each call to av_read_frame() or else you'll leak memory
av_free_packet(&packet);
}
// Some codecs will cause frames to be buffered up in the decoding process. If the CODEC_CAP_DELAY flag
// is set, there can be buffered up frames that need to be flushed, so we'll do that
if (codecContext->codec->capabilities & CODEC_CAP_DELAY)
{
av_init_packet(&packet);
// Decode all the remaining frames in the buffer, until the end is reached
int frameFinished = 0;
while (avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet) >= 0 && frameFinished)
{
}
}
// Clean up!
av_free(frame);
avcodec_close(codecContext);
av_close_input_file(formatContext);
fclose(fout);
}

"dlopen: Invalid argument" when loading native activity

I'm using the following bootstrapping code to load my native activity (jngl-test):
#include <android/native_activity.h>
#include <android/log.h>
#include <dlfcn.h>
#include <errno.h>
#include <stdexcept>
const std::string LIB_PATH = "/data/data/com.bixense.jngl_test/lib/";
void* load_lib(const std::string& l) {
void* handle = dlopen(l.c_str(), RTLD_NOW | RTLD_GLOBAL);
if (!handle) {
throw std::runtime_error(std::string("dlopen(") + l + "): " + strerror(errno));
}
return handle;
}
void ANativeActivity_onCreate(ANativeActivity* app, void* ud, size_t udsize) {
try {
load_lib(LIB_PATH + "libogg.so");
load_lib(LIB_PATH + "libvorbis.so");
auto main = reinterpret_cast<void (*)(ANativeActivity*, void*, size_t)>(
dlsym(load_lib(LIB_PATH + "libjngl-test.so"), "ANativeActivity_onCreate")
);
if (!main) {
throw std::runtime_error("undefined symbol ANativeActivity_onCreate");
}
main(app, ud, udsize);
} catch(std::exception& e) {
__android_log_print(ANDROID_LOG_ERROR, "bootstrap", e.what());
ANativeActivity_finish(app);
}
}
I get the following error message:
dlopen(/data/data/com.bixense.jngl_test/lib/libjngl-test.so): Invalid argument
This doesn't tell me at all whats going wrong. Is there a way to get more debug output? What could "Invalid argument" mean?
I fixed it:
dlerror()
gives a far better error message.
Here's the bootstrap code if someone is interested:
#include <android/native_activity.h>
#include <android/log.h>
#include <dlfcn.h>
#include <errno.h>
#include <stdexcept>
void* load_lib(const std::string& l) {
auto handle = dlopen(std::string("/data/data/com.bixense.jngl_test/lib/" + l).c_str(),
RTLD_NOW | RTLD_GLOBAL);
if (!handle) {
throw std::runtime_error(std::string("dlopen(") + l + "): " + dlerror());
}
return handle;
}
void ANativeActivity_onCreate(ANativeActivity* app, void* ud, size_t udsize) {
try {
load_lib("libogg.so");
load_lib("libvorbis.so");
auto main = reinterpret_cast<void (*)(ANativeActivity*, void*, size_t)>(
dlsym(load_lib("libjngl-test.so"), "ANativeActivity_onCreate")
);
if (!main) {
throw std::runtime_error("undefined symbol ANativeActivity_onCreate");
}
main(app, ud, udsize);
} catch(std::exception& e) {
__android_log_print(ANDROID_LOG_ERROR, "bootstrap", e.what());
ANativeActivity_finish(app);
}
}
You could do this..
put that lib in raw directory and load it
For raw files, you should consider creating a raw folder inside res directory and then call
getResources().openRawResource(resourceName)
from your activity.
then you can use it the way you like.

Create video from images

Is there a way to create a video from a series of images on android? Maybe a way to extend the MediaRecorder and being able to take images as input.
I try to really create the video and store it (as an mpeg4 file for instance).
Thanks for any suggestions.
I'm also trying to do the same thing. I have been advice to use Libav.
http://libav.org/
However I need to build it with the NDK and I currently have some issues doing it.
I'm looking for some doc about it. I'll keep you posted.
I've created a post about it: Libav build for Android
You can use AnimationDrawable in an ImageView.
Add frames using the AnimationDrawable.addFrame(Drawable frame, int duration) method, and start the animation using AnimationDrawable.start().
Not sure if that's ideal, but it would work.
I use Android + NDK
AVFrame* OpenImage(const char* imageFileName)
{
AVFormatContext *pFormatCtx = avformat_alloc_context();
std::cout<<"1"<<imageFileName<<std::endl;
if( avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL) < 0)
{
printf("Can't open image file '%s'\n", imageFileName);
return NULL;
}
std::cout<<"2"<<std::endl;
av_dump_format(pFormatCtx, 0, imageFileName, false);
AVCodecContext *pCodecCtx;
std::cout<<"3"<<std::endl;
pCodecCtx = pFormatCtx->streams[0]->codec;
pCodecCtx->width = W_VIDEO;
pCodecCtx->height = H_VIDEO;
//pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
// Find the decoder for the video stream
AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (!pCodec)
{
printf("Codec not found\n");
return NULL;
}
// Open codec
//if(avcodec_open2(pCodecCtx, pCodec)<0)
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)//check this NULL, it should be of AVDictionary **options
{
printf("Could not open codec\n");
return NULL;
}
std::cout<<"4"<<std::endl;
//
AVFrame *pFrame;
pFrame = av_frame_alloc();
if (!pFrame)
{
printf("Can't allocate memory for AVFrame\n");
return NULL;
}
printf("here");
int frameFinished;
int numBytes;
// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size( pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrame, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
// Read frame
AVPacket packet;
int framesNumber = 0;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if(packet.stream_index != 0)
continue;
int ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (ret > 0)
{
printf("Frame is decoded, size %d\n", ret);
pFrame->quality = 4;
return pFrame;
}
else
printf("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
}
}
int combine_images_to_video(const char * infile_dir, const char * infile_prefix, const char* infile_surname, int total_frames,const char *outfile)
{
if (total_frames <= 0){
std::cout << "Usage: cv2ff <dir_name> <prefix> <image surname> <total frames> <outfile>" << std::endl;
std::cout << "Please check that the 4th argument is integer value of total frames"<<std::endl;
return 1;
}
printf("max %d frames\n",total_frames);
char *imageFileName;
char numberChar[NUMNUMBER];
// initialize FFmpeg library
av_register_all();
// av_log_set_level(AV_LOG_DEBUG);
int ret;
const int dst_width = W_VIDEO;
const int dst_height = H_VIDEO;
const AVRational dst_fps = {30, 1};//{fps,1}
// open output format context
AVFormatContext* outctx = nullptr;
ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile);
//outctx->video_codec->
if (ret < 0) {
std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret;
return 2;
}
// open output IO context
ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr);
if (ret < 0) {
std::cerr << "fail to avio_open2: ret=" << ret;
return 2;
}
// create new video stream
AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec);
AVStream* vstrm = avformat_new_stream(outctx, vcodec);
if (!vstrm) {
std::cerr << "fail to avformat_new_stream";
return 2;
}
avcodec_get_context_defaults3(vstrm->codec, vcodec);
vstrm->codec->width = dst_width;
vstrm->codec->height = dst_height;
vstrm->codec->pix_fmt = vcodec->pix_fmts[0];
vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps);
vstrm->r_frame_rate = vstrm->avg_frame_rate = dst_fps;
if (outctx->oformat->flags & AVFMT_GLOBALHEADER)
vstrm->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// open video encoder
ret = avcodec_open2(vstrm->codec, vcodec, nullptr);
if (ret < 0) {
std::cerr << "fail to avcodec_open2: ret=" << ret;
return 2;
}
std::cout
<< "outfile: " << outfile << "\n"
<< "format: " << outctx->oformat->name << "\n"
<< "vcodec: " << vcodec->name << "\n"
<< "size: " << dst_width << 'x' << dst_height << "\n"
<< "fps: " << av_q2d(dst_fps) << "\n"
<< "pixfmt: " << av_get_pix_fmt_name(vstrm->codec->pix_fmt) << "\n"
<< std::flush;
// initialize sample scaler
SwsContext* swsctx = sws_getCachedContext(
nullptr, dst_width, dst_height, AV_PIX_FMT_BGR24,
dst_width, dst_height, vstrm->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
if (!swsctx) {
std::cerr << "fail to sws_getCachedContext";
return 2;
}
// allocate frame buffer for encoding
AVFrame* frame = av_frame_alloc();
std::vector<uint8_t> framebuf(avpicture_get_size(vstrm->codec->pix_fmt, dst_width, dst_height));
avpicture_fill(reinterpret_cast<AVPicture*>(frame), framebuf.data(), vstrm->codec->pix_fmt, dst_width, dst_height);
frame->width = dst_width;
frame->height = dst_height;
frame->format = static_cast<int>(vstrm->codec->pix_fmt);
// encoding loop
avformat_write_header(outctx, nullptr);
int64_t frame_pts = 0;
unsigned nb_frames = 0;
bool end_of_stream = false;
int got_pkt = 0;
int i =0;
imageFileName = (char *)malloc(strlen(infile_dir)+strlen(infile_prefix)+NUMNUMBER+strlen(infile_surname)+1);
do{
if(!end_of_stream){
strcpy(imageFileName,infile_dir);
//strcat(imageFileName,"/");
strcat(imageFileName,infile_prefix);
sprintf(numberChar,"%03d",i+1);
strcat(imageFileName,numberChar);
//strcat(imageFileName,".");
strcat(imageFileName,infile_surname);
__android_log_print(1, "RecordingImage", "%s", imageFileName);
std::cout<<imageFileName<<std::endl;
AVFrame* frame_from_file = OpenImage(imageFileName);
if(!frame_from_file){
std::cout<<"error OpenImage"<<std::endl;
return 5;
}
//const int Stride [] = {1920};
sws_scale(swsctx, frame_from_file->data, &STRIDE , 0, frame_from_file->height, frame->data, frame->linesize);
frame->pts = frame_pts++;
av_frame_free(&frame_from_file);
}
// encode video frame
AVPacket pkt;
pkt.data = nullptr;
pkt.size = 0;
av_init_packet(&pkt);
ret = avcodec_encode_video2(vstrm->codec, &pkt, end_of_stream ? nullptr : frame, &got_pkt);
if (ret < 0) {
std::cerr << "fail to avcodec_encode_video2: ret=" << ret << "\n";
return 2;
}
// rescale packet timestamp
pkt.duration = 1;
av_packet_rescale_ts(&pkt, vstrm->codec->time_base, vstrm->time_base);
// write packet
av_write_frame(outctx, &pkt);
std::cout << nb_frames << '\r' << std::flush; // dump progress
++nb_frames;
av_free_packet(&pkt);
i++;
if(i==total_frames-1)
end_of_stream = true;
} while (i<total_frames);
av_write_trailer(outctx);
std::cout << nb_frames << " frames encoded" << std::endl;
av_frame_free(&frame);
avcodec_close(vstrm->codec);
avio_close(outctx->pb);
avformat_free_context(outctx);
free(imageFileName);
return 0;
}
We can create video from images using ffmpeg.
Check out my post for using ffmpeg in android.
Use below command to create video from images placed in same folder
String command[]={"-y", "-r","1/5" ,"-i",src.getAbsolutePath(),
"-c:v","libx264","-vf", "fps=25","-pix_fmt","yuv420p", dest.getAbsolutePath()};
Here ,
src.getAbsolutePath() is the absolute path of all your input images.
For example,
If all your images are stored in Images folder inside Pictures directory with names
extract_picture001.jpg,extract_picture002.jpg,extract_picture003.jpg......
.
Then,
String filePrefix = "extract_picture";
String fileExtn = ".jpg";
File picDir = Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_PICTURES);
File dir = new File(picDir, "Images");
File src = new File(dir, filePrefix + "%03d" + fileExtn);
For creating video from images placed in different folder you
have to create a text file and add image paths to it and then specify
the path of that text file as an input option.
Example,
Text File
file '/storage/emulated/0/DCIM/Camera/P_20170807_143916.jpg'
duration 2
file '/storage/emulated/0/DCIM/Pic/P_20170305_142948.jpg'
duration 5
file '/storage/emulated/0/DCIM/Camera/P_20170305_142939.jpg'
duration 6
file '/storage/emulated/0/DCIM/Pic/P_20170305_142818.jpg'
duration 2
Command
String command[] = {"-y", "-f", "concat", "-safe", "0", "-i", textFile.getAbsolutePath(), "-vsync", "vfr", "-pix_fmt", "yuv420p", dest.getAbsolutePath()};
where textFile.getAbsolutePath() is the absolute path of your text file
Check out this ffmpeg doc for more info

Categories

Resources