Android Opensles resampling PCM with FFmpeg - android

I'm trying to play an audio stream with ffmpeg and opensles in android. And the problem seems to be when passing the decoded and resampled frames from ffmpeg to opensles as the sound I can hear sounds robotic and has scratching.
The decoded frames from ffmpeg:
PCM
48000 Hz
S16p
Opensles needs in this case:
PCM
48000 Hz
S16
Opensles setup:
SLDataLocator_AndroidSimpleBufferQueue loc_bufq = {SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, 255};
SLDataFormat_PCM format_pcm = { SL_DATAFORMAT_PCM, 2 , SL_SAMPLINGRATE_48, SL_PCMSAMPLEFORMAT_FIXED_16, SL_PCMSAMPLEFORMAT_FIXED_16,
SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT, SL_BYTEORDER_LITTLEENDIAN};
SLDataSource audioSrc = {&loc_bufq, &format_pcm};
This is the pseudocode for resampling and enqueueing to opensles:
#define OPENSLES_BUFLEN 10
#define MAX_AUDIO_FRAME_SIZE 192000
DECLARE_ALIGNED(16,uint8_t,audio_buffer)[MAX_AUDIO_FRAME_SIZE * OPENSLES_BUFLEN];
int decode_audio(AVCodecContext * ctx, SwrContext *swr_context, AVPacket *packet, AVFrame * frame){
int got_frame_ptr;
int len = avcodec_decode_audio4(ctx, frame, &got_frame_ptr, packet);
if(!got_frame_ptr)
return -ERROR;
int original_data_size = av_samples_get_buffer_size(NULL, ctx->channels,
frame->nb_samples, ctx->sample_fmt, 1);
uint8_t *audio_buf;
int data_size;
if (swr_context != NULL) {
uint8_t *out[] = { audio_buffer };
int sample_per_buffer_divider = 2* av_get_bytes_per_sample(AV_SAMPLE_FMT_S16);;
int len2 = swr_convert(swr_context, out,
sizeof(audio_buffer) / sample_per_buffer_divider,
frame->extended_data, frame->nb_samples);
if (len2 < 0) {
return -ERROR;
}
if (len2 == sizeof(audio_buffer) / sample_per_buffer_divider) {
swr_init(swr_context);
}
audio_buf = audio_buffer;
data_size = len2 * sample_per_buffer_divider;
}
else {
audio_buf = frame->data[0];
data_size = original_data_size;
}
(*opengSLESData->bqPlayerBufferQueue)->Enqueue(opengSLESData->bqPlayerBufferQueue, audio_buf, data_size)
}
I would appreciate any help, thanks.

example may help
#include "stdafx.h"
#include <iostream>
extern "C"
{
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
//#include "swscale.h"
#include "libswresample/swresample.h"
};
FILE *fin, *fout;
int ffmpeg_audio_decode( const char * inFile, const char * outFile)
{
// Initialize FFmpeg
av_register_all();
AVFrame* frame = avcodec_alloc_frame();
if (!frame)
{
std::cout << "Error allocating the frame" << std::endl;
return 1;
}
// you can change the file name "01 Push Me to the Floor.wav" to whatever the file is you're reading, like "myFile.ogg" or
// "someFile.webm" and this should still work
AVFormatContext* formatContext = NULL;
//if (avformat_open_input(&formatContext, "01 Push Me to the Floor.wav", NULL, NULL) != 0)
if (avformat_open_input(&formatContext, inFile, NULL, NULL) != 0)
{
av_free(frame);
std::cout << "Error opening the file" << std::endl;
return 1;
}
if (avformat_find_stream_info(formatContext, NULL) < 0)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Error finding the stream info" << std::endl;
return 1;
}
AVStream* audioStream = NULL;
// Find the audio stream (some container files can have multiple streams in them)
for (unsigned int i = 0; i < formatContext->nb_streams; ++i)
{
if (formatContext->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
{
audioStream = formatContext->streams[i];
break;
}
}
if (audioStream == NULL)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Could not find any audio stream in the file" << std::endl;
return 1;
}
AVCodecContext* codecContext = audioStream->codec;
codecContext->codec = avcodec_find_decoder(codecContext->codec_id);
if (codecContext->codec == NULL)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Couldn't find a proper decoder" << std::endl;
return 1;
}
else if (avcodec_open2(codecContext, codecContext->codec, NULL) != 0)
{
av_free(frame);
av_close_input_file(formatContext);
std::cout << "Couldn't open the context with the decoder" << std::endl;
return 1;
}
std::cout << "This stream has " << codecContext->channels << " channels and a sample rate of " << codecContext->sample_rate << "Hz" << std::endl;
std::cout << "The data is in the format " << av_get_sample_fmt_name(codecContext->sample_fmt) << std::endl;
//codecContext->sample_fmt = AV_SAMPLE_FMT_S16;
int64_t outChannelLayout = AV_CH_LAYOUT_MONO; //AV_CH_LAYOUT_STEREO;
AVSampleFormat outSampleFormat = AV_SAMPLE_FMT_S16; // Packed audio, non-planar (this is the most common format, and probably what you want; also, WAV needs it)
int outSampleRate = 8000;//44100;
// Note that AVCodecContext::channel_layout may or may not be set by libavcodec. Because of this,
// we won't use it, and will instead try to guess the layout from the number of channels.
SwrContext* swrContext = swr_alloc_set_opts(NULL,
outChannelLayout,
outSampleFormat,
outSampleRate,
av_get_default_channel_layout(codecContext->channels),
codecContext->sample_fmt,
codecContext->sample_rate,
0,
NULL);
if (swrContext == NULL)
{
av_free(frame);
avcodec_close(codecContext);
avformat_close_input(&formatContext);
std::cout << "Couldn't create the SwrContext" << std::endl;
return 1;
}
if (swr_init(swrContext) != 0)
{
av_free(frame);
avcodec_close(codecContext);
avformat_close_input(&formatContext);
swr_free(&swrContext);
std::cout << "Couldn't initialize the SwrContext" << std::endl;
return 1;
}
fout = fopen(outFile, "wb+");
AVPacket packet;
av_init_packet(&packet);
// Read the packets in a loop
while (av_read_frame(formatContext, &packet) == 0)
{
if (packet.stream_index == audioStream->index)
{
AVPacket decodingPacket = packet;
while (decodingPacket.size > 0)
{
// Try to decode the packet into a frame
int frameFinished = 0;
int result = avcodec_decode_audio4(
codecContext,
frame,
&frameFinished,
&decodingPacket);
if (result < 0 || frameFinished == 0)
{
break;
}
unsigned char buffer[100000] = {NULL};
unsigned char* pointers[SWR_CH_MAX] = {NULL};
pointers[0] = &buffer[0];
int numSamplesOut = swr_convert(
swrContext,
pointers,
outSampleRate,
(const unsigned char**)frame->extended_data,
frame->nb_samples);
fwrite(
(short *)buffer,
sizeof(short),
(size_t)numSamplesOut,
fout);
decodingPacket.size -= result;
decodingPacket.data += result;
}
}
// You *must* call av_free_packet() after each call to av_read_frame() or else you'll leak memory
av_free_packet(&packet);
}
// Some codecs will cause frames to be buffered up in the decoding process. If the CODEC_CAP_DELAY flag
// is set, there can be buffered up frames that need to be flushed, so we'll do that
if (codecContext->codec->capabilities & CODEC_CAP_DELAY)
{
av_init_packet(&packet);
// Decode all the remaining frames in the buffer, until the end is reached
int frameFinished = 0;
while (avcodec_decode_audio4(codecContext, frame, &frameFinished, &packet) >= 0 && frameFinished)
{
}
}
// Clean up!
av_free(frame);
avcodec_close(codecContext);
av_close_input_file(formatContext);
fclose(fout);
}

Related

Calling ffmpeg.c's main twice causes app crash

Using FFmpeg 4.0.2 and call its ffmpeg.c's main function twice causes Android app crash (using FFmpeg shared libs and JNI)
A/libc: Fatal signal 11 (SIGSEGV), code 1, fault addr 0x0 in tid 20153
Though it works ok for FFmpeg 3.2.5
FFmpeg 4.0.2 main
int main(int argc, char **argv) {
int i, ret;
int64_t ti;
init_dynload();
register_exit(ffmpeg_cleanup);
setvbuf(stderr,NULL,_IONBF,0); /* win32 runtime needs this */
av_log_set_flags(AV_LOG_SKIP_REPEATED);
parse_loglevel(argc, argv, options);
if(argc>1 && !strcmp(argv[1], "-d")){
run_as_daemon=1;
av_log_set_callback(log_callback_null);
argc--;
argv++;
}
#if CONFIG_AVDEVICE
avdevice_register_all();
#endif
avformat_network_init();
show_banner(argc, argv, options);
/* parse options and open all input/output files */
ret = ffmpeg_parse_options(argc, argv);
if (ret < 0)
exit_program(1);
if (nb_output_files <= 0 && nb_input_files == 0) {
show_usage();
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n", program_name);
exit_program(1);
}
/* file converter / grab */
if (nb_output_files <= 0) {
av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
exit_program(1);
}
// if (nb_input_files == 0) {
// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
// exit_program(1);
// }
for (i = 0; i < nb_output_files; i++) {
if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
want_sdp = 0;
}
current_time = ti = getutime();
if (transcode() < 0)
exit_program(1);
ti = getutime() - ti;
if (do_benchmark) {
av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
}
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
decode_error_stat[0], decode_error_stat[1]);
if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
exit_program(69);
ffmpeg_cleanup(received_nb_signals ? 255 : main_return_code);
return main_return_code;
}
FFmpeg 3.2.5 main
int main(int argc, char **argv) {
av_log(NULL, AV_LOG_WARNING, " Command start");
int i, ret;
int64_t ti;
init_dynload();
register_exit(ffmpeg_cleanup);
setvbuf(stderr, NULL, _IONBF, 0); /* win32 runtime needs this */
av_log_set_flags(AV_LOG_SKIP_REPEATED);
parse_loglevel(argc, argv, options);
if (argc > 1 && !strcmp(argv[1], "-d")) {
run_as_daemon = 1;
av_log_set_callback(log_callback_null);
argc--;
argv++;
}
avcodec_register_all();
#if CONFIG_AVDEVICE
avdevice_register_all();
#endif
avfilter_register_all();
av_register_all();
avformat_network_init();
av_log(NULL, AV_LOG_WARNING, " Register to complete the codec");
show_banner(argc, argv, options);
/* parse options and open all input/output files */
ret = ffmpeg_parse_options(argc, argv);
if (ret < 0)
exit_program(1);
if (nb_output_files <= 0 && nb_input_files == 0) {
show_usage();
av_log(NULL, AV_LOG_WARNING, "Use -h to get full help or, even better, run 'man %s'\n",
program_name);
exit_program(1);
}
/* file converter / grab */
if (nb_output_files <= 0) {
av_log(NULL, AV_LOG_FATAL, "At least one output file must be specified\n");
exit_program(1);
}
// if (nb_input_files == 0) {
// av_log(NULL, AV_LOG_FATAL, "At least one input file must be specified\n");
// exit_program(1);
// }
for (i = 0; i < nb_output_files; i++) {
if (strcmp(output_files[i]->ctx->oformat->name, "rtp"))
want_sdp = 0;
}
current_time = ti = getutime();
if (transcode() < 0)
exit_program(1);
ti = getutime() - ti;
if (do_benchmark) {
av_log(NULL, AV_LOG_INFO, "bench: utime=%0.3fs\n", ti / 1000000.0);
}
av_log(NULL, AV_LOG_DEBUG, "%"PRIu64" frames successfully decoded, %"PRIu64" decoding errors\n",
decode_error_stat[0], decode_error_stat[1]);
if ((decode_error_stat[0] + decode_error_stat[1]) * max_error_rate < decode_error_stat[1])
exit_program(69);
exit_program(received_nb_signals ? 255 : main_return_code);
nb_filtergraphs = 0;
nb_input_streams = 0;
nb_input_files = 0;
progress_avio = NULL;
input_streams = NULL;
nb_input_streams = 0;
input_files = NULL;
nb_input_files = 0;
output_streams = NULL;
nb_output_streams = 0;
output_files = NULL;
nb_output_files = 0;
return main_return_code;
}
So what could be issue? It seems FFmpeg 4.0.2 doesn't release something (resources or its static variables to initial values after the first command)
Adding next lines from FFmpeg 3.2.5 to FFmpeg 4.0.2 to the end of main function solved the problem (I downloaded FFmpeg 3.2.5 as someone's Android project so that user added those lines)
nb_filtergraphs = 0;
nb_input_streams = 0;
nb_input_files = 0;
progress_avio = NULL;
input_streams = NULL;
nb_input_streams = 0;
input_files = NULL;
nb_input_files = 0;
output_streams = NULL;
nb_output_streams = 0;
output_files = NULL;
nb_output_files = 0;

Affdex SDK Ubuntu Compilation issues

I am trying to compile the following wrapper library code to use the Affectiva library for Ubuntu:
#include <iostream>
#include <memory>
#include <chrono>
#include <fstream>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <boost/filesystem.hpp>
#include <boost/timer/timer.hpp>
#include <boost/program_options.hpp>
#include "VideoDetector.h"
#include "PhotoDetector.h"
#include "AffdexException.h"
#include "PlottingImageListener.hpp"
#include "StatusListener.hpp"
using namespace std;
using namespace affdex;
int main(int argsc, char * argsv) {
std::map<boost::filesystem::path, bool> VIDEO_EXTS = { {boost::filesystem::path(".avi"), 1},
{boost::filesystem::path(".mov"), 1},
{boost::filesystem::path(".flv"), 1},
{boost::filesystem::path(".webm"), 1},
{boost::filesystem::path(".wmv"), 1},
{boost::filesystem::path(".mp4"), 1} };
affdex::path DATA_FOLDER;
affdex::path videoPath;
int process_framerate = 30;
bool draw_display = true;
bool loop = false;
unsigned int nFaces = 1;
int faceDetectorMode = (int)FaceDetectorMode::LARGE_FACES;
const int precision = 2;
std::cerr.precision(precision);
std::cout.precision(precision);
namespace po = boost::program_options; // abbreviate namespace
po::options_description description("Project for demoing the Affdex SDK VideoDetector class (processing video files).");
description.add_options()
#ifdef _WIN32
("data,d", po::wvalue< affdex::path >(&DATA_FOLDER)->default_value(affdex::path(L"data"), std::string("data")), "Path to the data folder")
("input,i", po::wvalue< affdex::path >(&videoPath)->required(), "Video file to processs")
#else // _WIN32
("data,d", po::value< affdex::path >(&DATA_FOLDER)->default_value(affdex::path("data"), std::string("data")), "Path to the data folder")
("input,i", po::value< affdex::path >(&videoPath)->required(), "Video file to processs")
#endif // _WIN32
("pfps", po::value< int >(&process_framerate)->default_value(30), "Processing framerate.")
("faceMode", po::value< int >(&faceDetectorMode)->default_value((int)FaceDetectorMode::SMALL_FACES), "Face detector mode (large faces vs small faces).")
("numFaces", po::value< unsigned int >(&nFaces)->default_value(1), "Number of faces to be tracked.")
("loop", po::value< bool >(&loop)->default_value(false), "Loop over the video being processed.")
;
po::variables_map args;
try
{
po::store(po::command_line_parser(argsc, &argsv).options(description).run(), args);
if (args["help"].as<bool>())
{
std::cout << description << std::endl;
return 0;
}
po::notify(args);
}
catch (po::error& e)
{
std::cerr << "ERROR: " << e.what() << std::endl << std::endl;
std::cerr << "For help, use the -h option." << std::endl << std::endl;
return 1;
}
// Parse and check the data folder (with assets)
if (!boost::filesystem::exists(DATA_FOLDER))
{
std::cerr << "Data folder doesn't exist: " << std::string(DATA_FOLDER.begin(), DATA_FOLDER.end()) << std::endl;
std::cerr << "Try specifying the folder through the command line" << std::endl;
std::cerr << description << std::endl;
return 1;
}
try
{
std::shared_ptr<Detector> detector;
//Initialize out file
boost::filesystem::path csvPath(videoPath);
boost::filesystem::path fileExt = csvPath.extension();
csvPath.replace_extension(".csv");
std::ofstream csvFileStream(csvPath.c_str());
if (!csvFileStream.is_open())
{
std::cerr << "Unable to open csv file " << csvPath << std::endl;
return 1;
}
if (VIDEO_EXTS[fileExt]) // IF it is a video file.
{
detector = std::make_shared<VideoDetector>(process_framerate, nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
}
else //Otherwise it's a photo
{
detector = std::make_shared<PhotoDetector>(nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
}
//VideoDetector videoDetector(process_framerate, nFaces, (affdex::FaceDetectorMode) faceDetectorMode);
std::cout << "Max num of faces set to: " << detector->getMaxNumberFaces() << std::endl;
std::string mode;
switch (detector->getFaceDetectorMode())
{
case FaceDetectorMode::LARGE_FACES:
mode = "LARGE_FACES";
break;
case FaceDetectorMode::SMALL_FACES:
mode = "SMALL_FACES";
break;
default:
break;
}
std::cout << "Face detector mode set to: " << mode << std::endl;
shared_ptr<PlottingImageListener> listenPtr(new PlottingImageListener(csvFileStream, draw_display));
detector->setDetectAllEmotions(true);
detector->setDetectAllExpressions(true);
detector->setDetectAllEmojis(true);
detector->setDetectAllAppearances(true);
detector->setClassifierPath(DATA_FOLDER);
detector->setImageListener(listenPtr.get());
detector->start(); //Initialize the detectors .. call only once
do
{
shared_ptr<StatusListener> videoListenPtr = std::make_shared<StatusListener>();
detector->setProcessStatusListener(videoListenPtr.get());
if (VIDEO_EXTS[fileExt])
{
((VideoDetector *)detector.get())->process(videoPath); //Process a video
}
else
{
//videoPath is of type std::wstring on windows, but std::string on other platforms.
cv::Mat img = cv::imread(std::string(videoPath.begin(), videoPath.end()));
// Create a frame
Frame frame(img.size().width, img.size().height, img.data, Frame::COLOR_FORMAT::BGR);
((PhotoDetector *)detector.get())->process(frame); //Process an image
}
do
{
if (listenPtr->getDataSize() > 0)
{
std::pair<Frame, std::map<FaceId, Face> > dataPoint = listenPtr->getData();
Frame frame = dataPoint.first;
std::map<FaceId, Face> faces = dataPoint.second;
if (draw_display)
{
listenPtr->draw(faces, frame);
}
std::cerr << "timestamp: " << frame.getTimestamp()
<< " cfps: " << listenPtr->getCaptureFrameRate()
<< " pfps: " << listenPtr->getProcessingFrameRate()
<< " faces: "<< faces.size() << endl;
listenPtr->outputToFile(faces, frame.getTimestamp());
}
} while (VIDEO_EXTS[fileExt] && (videoListenPtr->isRunning() || listenPtr->getDataSize() > 0));
} while(loop);
detector->stop();
csvFileStream.close();
std::cout << "Output written to file: " << csvPath << std::endl;
}
catch (AffdexException ex)
{
std::cerr << ex.what();
}
return 0;
}
My command is:
g++-4.8 VidDetector.cpp -o VidDetector -std=c++11 -I/root/affdex-sdk/include -I/root/affdexUbuntu/include -I/root/sdk-samples/common -L/root/affdex-sdk/lib -l affdex-native -L/usr/lib/x86_64-linux-gnu -l boost_system -l boost_filesystem
The result is an issue with linking the boost library:
VidDetector.cpp:(.text+0x8d9): undefined reference to `boost::program_options::variables_map::variables_map()'
VidDetector.cpp:(.text+0x943): undefined reference to `boost::program_options::store(boost::program_options::basic_parsed_options<char> const&, boost::program_options::variables_map&, bool)'
VidDetector.cpp:(.text+0x9e5): undefined reference to `boost::program_options::operator<<(std::ostream&, boost::program_options::options_description const&)'
VidDetector.cpp:(.text+0xa0b): undefined reference to `boost::program_options::notify(boost::program_options::variables_map&)'
VidDetector.cpp:(.text+0xb1a): undefined reference to `boost::program_options::operator<<(std::ostream&, boost::program_options::options_description const&)'
/tmp/ccUPvUA2.o: In function `boost::program_options::error_with_option_name::~error_with_option_name()':
I am not sure how to resolve these issues. I have tried including the boost .so files with g++ but it still is not finding the references to the necessary files. What am I missing in the command that will fix this issue?

Recieving text data from remote android bluetooth to Windows xp PC

I want to recieve text data from remote android bluetooth. I had written simple winApi server, but it doesnt respond to remote android sending. The connection was fine and did responds( paired) using control panel manager with my bluetooth. When sending and recieving over control panel manager was working fine. But This time, I want to test using some c++ winapi. Here's my code so far. It was adapted from somewhere codeproject site :)
#include <stdio.h>
#include <stdlib.h>
#include <sstream>
#include <string>
#include <tchar.h>
#include <WinSock2.h>
#include <ws2bth.h>
#include <BluetoothAPIs.h>
using namespace std;
int _tmain(int argc, _TCHAR* argv[])
{
WORD wVersionRequested = 0x202;
WSADATA m_data;
if (0 == WSAStartup(wVersionRequested, &m_data))
{
SOCKET s = socket(AF_BTH, SOCK_STREAM, BTHPROTO_RFCOMM);
const DWORD lastError = GetLastError();
if (s == INVALID_SOCKET)
{
ostringstream stream;
stream << lastError;
string data= stream.str();
printf("Failed to get bluetooth socket! %s\n", data.c_str());
return 1;
}
WSAPROTOCOL_INFO protocolInfo;
int protocolInfoSize = sizeof(protocolInfo);
if (0 != getsockopt(s, SOL_SOCKET, SO_PROTOCOL_INFO,
(char*)&protocolInfo, &protocolInfoSize))
{
return 1;
}
SOCKADDR_BTH address;
address.addressFamily = AF_BTH;
address.btAddr = 0;
address.serviceClassId = GUID_NULL;
address.port = BT_PORT_ANY;
sockaddr *pAddr = (sockaddr*)&address;
if (0 != bind(s, pAddr, sizeof(SOCKADDR_BTH)))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str() );
}
else
{
printf("\nBinding Successful....\n");
int length = sizeof(SOCKADDR_BTH) ;
getsockname(s,(sockaddr*)&address,&length);
wprintf (L"Local Bluetooth device is %04x%08x \nServer channel = %d\n",
GET_NAP(address.btAddr), GET_SAP(address.btAddr), address.port);
}
int size = sizeof(SOCKADDR_BTH);
if (0 != getsockname(s, pAddr, &size))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str());
}
if (0 != listen(s, 10))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str());
}
WSAQUERYSET service;
memset(&service, 0, sizeof(service));
service.dwSize = sizeof(service);
// service.lpszServiceInstanceName = reinterpret_cast<LPWSTR>(_T("Accelerometer Data..."));
//service.lpszServiceInstanceName = ;
// service.lpszComment = reinterpret_cast<LPWSTR>(_T("Pushing data to PC"));
GUID serviceID = OBEXFileTransferServiceClass_UUID;
service.lpServiceClassId = &serviceID;
service.dwNumberOfCsAddrs = 1;
service.dwNameSpace = NS_BTH;
CSADDR_INFO csAddr;
memset(&csAddr, 0, sizeof(csAddr));
csAddr.LocalAddr.iSockaddrLength = sizeof(SOCKADDR_BTH);
csAddr.LocalAddr.lpSockaddr = pAddr;
csAddr.iSocketType = SOCK_STREAM;
csAddr.iProtocol = BTHPROTO_RFCOMM;
service.lpcsaBuffer = &csAddr;
if (0 != WSASetService(&service, RNRSERVICE_REGISTER, 0))
{
printf("Service registration failed....");
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%d\n", data.c_str());
}
else
{
printf("\nService registration Successful....\n");
}
HANDLE hRadio ;
BLUETOOTH_FIND_RADIO_PARAMS btfrp = { sizeof(BLUETOOTH_FIND_RADIO_PARAMS) };
HBLUETOOTH_RADIO_FIND hFind = BluetoothFindFirstRadio(&btfrp, &hRadio);
if ((hFind != NULL)&& (hRadio !=NULL))
{
if (BluetoothEnableDiscovery(hRadio,TRUE)){
printf("BluetoothEnableDiscovery() is working!\n");
}
if (BluetoothEnableIncomingConnections(hRadio,TRUE)){
printf("BluetoothEnableIncomingConnections() is working!\n");
}
if (BluetoothIsConnectable(hRadio)){
printf("BluetoothIsConnectable() is working!\n");
}
}
printf("\nBefore accept.........");
SOCKADDR_BTH sab2;
int ilen = sizeof(sab2);
SOCKET s2 = accept (s,(sockaddr*)&sab2, &ilen);
if (s2 == INVALID_SOCKET)
{
wprintf (L"Socket bind, error %d\n", WSAGetLastError ());
}
wprintf (L"\nConnection came from %04x%08x to channel %d\n",
GET_NAP(sab2.btAddr), GET_SAP(sab2.btAddr), sab2.port);
wprintf (L"\nAfter Accept\n");
char buffer[1024] = {0};
memset(buffer, 0, sizeof(buffer));
int r = recv(s2,(char*)buffer, sizeof(buffer), 0);
printf("%s\n",buffer);
closesocket(s2);
if (0 != WSASetService(&service, RNRSERVICE_DELETE, 0))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str());
}
closesocket(s);
WSACleanup();
return 0;
}
}
I use microsoft BT device btw..
Just to be sure: you said you're on WinXP... OK, but WinXP SP3 with Microsoft BT stack and appropriate dongle? or another XP version and another BT stack (like Widcomm or BlueSoleil...)?
In the later case, BT Socket API won't work...
Otherwise, not much to say about the code, except a missing BluetoothFindRadioClose...

Choppy audio decoding when using swr_convert in Android

I'm using the following C function to decode packets in Android (with JNI). When I play an mp3 file the code works fine however and wma file results in choppy audio. I suspect the issue may be with the "swr_convert" function and the data_size I'm using but I'm not sure. Does anyone know why this would be happening?
int decodeFrameFromPacket(AVPacket *aPacket) {
int n;
AVPacket *pkt = aPacket;
AVFrame *decoded_frame = NULL;
int got_frame = 0;
if (aPacket->stream_index == global_audio_state->audio_stream) {
if (!decoded_frame) {
if (!(decoded_frame = avcodec_alloc_frame())) {
__android_log_print(ANDROID_LOG_INFO, TAG, "Could not allocate audio frame\n");
return -2;
}
}
if (avcodec_decode_audio4(global_audio_state->audio_st->codec, decoded_frame, &got_frame, aPacket) < 0) {
__android_log_print(ANDROID_LOG_INFO, TAG, "Error while decoding\n");
return -2;
}
int data_size = 0;
if (got_frame) {
/* if a frame has been decoded, output it */
data_size = av_samples_get_buffer_size(NULL, global_audio_state->audio_st->codec->channels,
decoded_frame->nb_samples,
global_audio_state->audio_st->codec->sample_fmt, 1);
}
swr_convert(global_audio_state->swr, (uint8_t **) &gAudioFrameRefBuffer, decoded_frame->nb_samples, (uint8_t const **) decoded_frame->data, decoded_frame->nb_samples);
avcodec_free_frame(&decoded_frame);
gAudioFrameDataLengthRefBuffer[0] = data_size;
return AUDIO_DATA_ID;
}
return 0;
}

Create video from images

Is there a way to create a video from a series of images on android? Maybe a way to extend the MediaRecorder and being able to take images as input.
I try to really create the video and store it (as an mpeg4 file for instance).
Thanks for any suggestions.
I'm also trying to do the same thing. I have been advice to use Libav.
http://libav.org/
However I need to build it with the NDK and I currently have some issues doing it.
I'm looking for some doc about it. I'll keep you posted.
I've created a post about it: Libav build for Android
You can use AnimationDrawable in an ImageView.
Add frames using the AnimationDrawable.addFrame(Drawable frame, int duration) method, and start the animation using AnimationDrawable.start().
Not sure if that's ideal, but it would work.
I use Android + NDK
AVFrame* OpenImage(const char* imageFileName)
{
AVFormatContext *pFormatCtx = avformat_alloc_context();
std::cout<<"1"<<imageFileName<<std::endl;
if( avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL) < 0)
{
printf("Can't open image file '%s'\n", imageFileName);
return NULL;
}
std::cout<<"2"<<std::endl;
av_dump_format(pFormatCtx, 0, imageFileName, false);
AVCodecContext *pCodecCtx;
std::cout<<"3"<<std::endl;
pCodecCtx = pFormatCtx->streams[0]->codec;
pCodecCtx->width = W_VIDEO;
pCodecCtx->height = H_VIDEO;
//pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
// Find the decoder for the video stream
AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (!pCodec)
{
printf("Codec not found\n");
return NULL;
}
// Open codec
//if(avcodec_open2(pCodecCtx, pCodec)<0)
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)//check this NULL, it should be of AVDictionary **options
{
printf("Could not open codec\n");
return NULL;
}
std::cout<<"4"<<std::endl;
//
AVFrame *pFrame;
pFrame = av_frame_alloc();
if (!pFrame)
{
printf("Can't allocate memory for AVFrame\n");
return NULL;
}
printf("here");
int frameFinished;
int numBytes;
// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size( pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrame, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
// Read frame
AVPacket packet;
int framesNumber = 0;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if(packet.stream_index != 0)
continue;
int ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (ret > 0)
{
printf("Frame is decoded, size %d\n", ret);
pFrame->quality = 4;
return pFrame;
}
else
printf("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
}
}
int combine_images_to_video(const char * infile_dir, const char * infile_prefix, const char* infile_surname, int total_frames,const char *outfile)
{
if (total_frames <= 0){
std::cout << "Usage: cv2ff <dir_name> <prefix> <image surname> <total frames> <outfile>" << std::endl;
std::cout << "Please check that the 4th argument is integer value of total frames"<<std::endl;
return 1;
}
printf("max %d frames\n",total_frames);
char *imageFileName;
char numberChar[NUMNUMBER];
// initialize FFmpeg library
av_register_all();
// av_log_set_level(AV_LOG_DEBUG);
int ret;
const int dst_width = W_VIDEO;
const int dst_height = H_VIDEO;
const AVRational dst_fps = {30, 1};//{fps,1}
// open output format context
AVFormatContext* outctx = nullptr;
ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile);
//outctx->video_codec->
if (ret < 0) {
std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret;
return 2;
}
// open output IO context
ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr);
if (ret < 0) {
std::cerr << "fail to avio_open2: ret=" << ret;
return 2;
}
// create new video stream
AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec);
AVStream* vstrm = avformat_new_stream(outctx, vcodec);
if (!vstrm) {
std::cerr << "fail to avformat_new_stream";
return 2;
}
avcodec_get_context_defaults3(vstrm->codec, vcodec);
vstrm->codec->width = dst_width;
vstrm->codec->height = dst_height;
vstrm->codec->pix_fmt = vcodec->pix_fmts[0];
vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps);
vstrm->r_frame_rate = vstrm->avg_frame_rate = dst_fps;
if (outctx->oformat->flags & AVFMT_GLOBALHEADER)
vstrm->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// open video encoder
ret = avcodec_open2(vstrm->codec, vcodec, nullptr);
if (ret < 0) {
std::cerr << "fail to avcodec_open2: ret=" << ret;
return 2;
}
std::cout
<< "outfile: " << outfile << "\n"
<< "format: " << outctx->oformat->name << "\n"
<< "vcodec: " << vcodec->name << "\n"
<< "size: " << dst_width << 'x' << dst_height << "\n"
<< "fps: " << av_q2d(dst_fps) << "\n"
<< "pixfmt: " << av_get_pix_fmt_name(vstrm->codec->pix_fmt) << "\n"
<< std::flush;
// initialize sample scaler
SwsContext* swsctx = sws_getCachedContext(
nullptr, dst_width, dst_height, AV_PIX_FMT_BGR24,
dst_width, dst_height, vstrm->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
if (!swsctx) {
std::cerr << "fail to sws_getCachedContext";
return 2;
}
// allocate frame buffer for encoding
AVFrame* frame = av_frame_alloc();
std::vector<uint8_t> framebuf(avpicture_get_size(vstrm->codec->pix_fmt, dst_width, dst_height));
avpicture_fill(reinterpret_cast<AVPicture*>(frame), framebuf.data(), vstrm->codec->pix_fmt, dst_width, dst_height);
frame->width = dst_width;
frame->height = dst_height;
frame->format = static_cast<int>(vstrm->codec->pix_fmt);
// encoding loop
avformat_write_header(outctx, nullptr);
int64_t frame_pts = 0;
unsigned nb_frames = 0;
bool end_of_stream = false;
int got_pkt = 0;
int i =0;
imageFileName = (char *)malloc(strlen(infile_dir)+strlen(infile_prefix)+NUMNUMBER+strlen(infile_surname)+1);
do{
if(!end_of_stream){
strcpy(imageFileName,infile_dir);
//strcat(imageFileName,"/");
strcat(imageFileName,infile_prefix);
sprintf(numberChar,"%03d",i+1);
strcat(imageFileName,numberChar);
//strcat(imageFileName,".");
strcat(imageFileName,infile_surname);
__android_log_print(1, "RecordingImage", "%s", imageFileName);
std::cout<<imageFileName<<std::endl;
AVFrame* frame_from_file = OpenImage(imageFileName);
if(!frame_from_file){
std::cout<<"error OpenImage"<<std::endl;
return 5;
}
//const int Stride [] = {1920};
sws_scale(swsctx, frame_from_file->data, &STRIDE , 0, frame_from_file->height, frame->data, frame->linesize);
frame->pts = frame_pts++;
av_frame_free(&frame_from_file);
}
// encode video frame
AVPacket pkt;
pkt.data = nullptr;
pkt.size = 0;
av_init_packet(&pkt);
ret = avcodec_encode_video2(vstrm->codec, &pkt, end_of_stream ? nullptr : frame, &got_pkt);
if (ret < 0) {
std::cerr << "fail to avcodec_encode_video2: ret=" << ret << "\n";
return 2;
}
// rescale packet timestamp
pkt.duration = 1;
av_packet_rescale_ts(&pkt, vstrm->codec->time_base, vstrm->time_base);
// write packet
av_write_frame(outctx, &pkt);
std::cout << nb_frames << '\r' << std::flush; // dump progress
++nb_frames;
av_free_packet(&pkt);
i++;
if(i==total_frames-1)
end_of_stream = true;
} while (i<total_frames);
av_write_trailer(outctx);
std::cout << nb_frames << " frames encoded" << std::endl;
av_frame_free(&frame);
avcodec_close(vstrm->codec);
avio_close(outctx->pb);
avformat_free_context(outctx);
free(imageFileName);
return 0;
}
We can create video from images using ffmpeg.
Check out my post for using ffmpeg in android.
Use below command to create video from images placed in same folder
String command[]={"-y", "-r","1/5" ,"-i",src.getAbsolutePath(),
"-c:v","libx264","-vf", "fps=25","-pix_fmt","yuv420p", dest.getAbsolutePath()};
Here ,
src.getAbsolutePath() is the absolute path of all your input images.
For example,
If all your images are stored in Images folder inside Pictures directory with names
extract_picture001.jpg,extract_picture002.jpg,extract_picture003.jpg......
.
Then,
String filePrefix = "extract_picture";
String fileExtn = ".jpg";
File picDir = Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_PICTURES);
File dir = new File(picDir, "Images");
File src = new File(dir, filePrefix + "%03d" + fileExtn);
For creating video from images placed in different folder you
have to create a text file and add image paths to it and then specify
the path of that text file as an input option.
Example,
Text File
file '/storage/emulated/0/DCIM/Camera/P_20170807_143916.jpg'
duration 2
file '/storage/emulated/0/DCIM/Pic/P_20170305_142948.jpg'
duration 5
file '/storage/emulated/0/DCIM/Camera/P_20170305_142939.jpg'
duration 6
file '/storage/emulated/0/DCIM/Pic/P_20170305_142818.jpg'
duration 2
Command
String command[] = {"-y", "-f", "concat", "-safe", "0", "-i", textFile.getAbsolutePath(), "-vsync", "vfr", "-pix_fmt", "yuv420p", dest.getAbsolutePath()};
where textFile.getAbsolutePath() is the absolute path of your text file
Check out this ffmpeg doc for more info

Categories

Resources