Start Linphone MediaStreamer2 Audio & Video Stream Together - android

https://github.com/BelledonneCommunications/mediastreamer2
https://github.com/BelledonneCommunications/linphone-android
Using only mediastreamer2 library I am able to start a audio call with remote ip and port given and calling audiostream.c respective methods.
i then needed to start video call as well. so initialised videostream.c and
using its respective methods and providing it with surfaces to render remote and local camera feed. i am able to start video stream successfully with remote port and ip given.
But problem is when i start both streams together. sound stops and video streaming also gets stop. only local camera feed works.
So i have a one magical method that do all this for me. if i comment the video part of it. audio call works fine and if comment the audio part, video call works fine. but when i start both. No sound No Streaming.
but we get AudioStream started successfully and VideoStream started successfully log.
Can someone with linphone experience help as figuring the correct sequence of methods? or what are we doing wrong. Here is our method.
JNIEXPORT jint JNICALL Java_com_myapp_services_LinPhoneMSEngine_LinPhoneMSVE_1AudioStreamStartFull
(JNIEnv *env, jclass self, jstring remote_ip, jint remote_port, jint localport, jint payloadindex, jboolean isLowEnd)
{
int bRetVal = 0;
MSVideoSize size = {320, 240};
char rtcp_tool[128]={0};
int ret;
//jboolean copy;
char cname[128]={0};
const char *cremote_ip;
ortp_warning("Audio Stream Start Full");
LOGD("Audio Stream Start Full");
cremote_ip = (*env)->GetStringUTFChars(env, remote_ip, NULL);
ortp_warning("Cremote_ip= %s", cremote_ip);
LOGD("Cremote_ip= %s", cremote_ip);
// ms_filter_enable_statistics(TRUE);
veData->queue = ortp_ev_queue_new();
veData->soundCard = NULL;
set_playback_device();
ortp_warning("sound: playback_dev_id: %s", ms_snd_card_get_string_id(veData->soundCard));
LOGD("sound: playback_dev_id: %s", ms_snd_card_get_string_id(veData->soundCard));
veData->CaptureCard = NULL;
set_capture_device();
ortp_warning("sound: capture_dev_id: %s", ms_snd_card_get_string_id(veData->CaptureCard));
LOGD("sound: capture_dev_id: %s", ms_snd_card_get_string_id(veData->CaptureCard));
veData->audioStream = audio_stream_new(msFactory ,localport, localport + 1, false);
audio_stream_enable_adaptive_bitrate_control(veData->audioStream, true);
audio_stream_enable_adaptive_jittcomp(veData->audioStream, true);
rtp_session_set_jitter_compensation(veData->audioStream->ms.sessions.rtp_session, 50);
rtp_session_enable_rtcp_mux(veData->audioStream->ms.sessions.rtp_session, true);
ret=AUDIO_STREAM_FEATURE_VOL_SND | \
AUDIO_STREAM_FEATURE_VOL_RCV;
if (!isLowEnd)
{
ret = ret | AUDIO_STREAM_FEATURE_EC | AUDIO_STREAM_FEATURE_EQUALIZER | AUDIO_STREAM_FEATURE_DTMF | AUDIO_STREAM_FEATURE_DTMF_ECHO;
audio_stream_set_features(veData->audioStream, ret);
ortp_warning("Setting Echo Canceller params");
LOGD("Setting Echo Canceller params");
rtp_session_enable_jitter_buffer(veData->audioStream->ms.sessions.rtp_session, TRUE);
audio_stream_set_echo_canceller_params(veData->audioStream, 60, 0, 128);
audio_stream_enable_gain_control(veData->audioStream, true);
audio_stream_enable_automatic_gain_control(veData->audioStream, true);
}
else
{
audio_stream_set_features(veData->audioStream, ret);
ortp_warning("No Echo Canceller params!!");
LOGD("No Echo Canceller params!!");
rtp_session_enable_jitter_buffer(veData->audioStream->ms.sessions.rtp_session, FALSE);
}
if( veData->audioStream == NULL){
ortp_warning("AudioStream is Null");
LOGD("AudioStream is Null");
bRetVal = -1;
return -1;
}
audio_stream_play_received_dtmfs(veData->audioStream, true);
snprintf(rtcp_tool,sizeof(rtcp_tool)-1,"%s-%s","Android","2.8.0");
snprintf(cname,sizeof(cname)-1,"%s-%d", cremote_ip, remote_port);
ortp_warning("cname value: %s",cname);
LOGD("cname value: %s",cname);
audio_stream_prepare_sound(veData->audioStream, veData->soundCard, veData->CaptureCard);
if(0== audio_stream_start_full(veData->audioStream,veData->prof, cremote_ip, remote_port, cremote_ip, remote_port + 1, 114, 50,NULL,NULL,veData->soundCard,veData->CaptureCard, !isLowEnd))
{
veData->rtpSession = veData->audioStream->ms.sessions.rtp_session;
ortp_warning("AudioStreamStartFull Success");
post_audio_config(veData->audioStream);
audio_stream_set_rtcp_information(veData->audioStream, cname, rtcp_tool);
}
else
{
ortp_warning("AudioStream start failed");
bRetVal = -1;
}
// init video stream
veData->videoStream = video_stream_new(msFactory, localport,localport+1,false);
video_stream_enable_adaptive_bitrate_control(veData->videoStream, true);
video_stream_enable_adaptive_jittcomp(veData->videoStream, true);
rtp_session_enable_rtcp_mux(veData->videoStream->ms.sessions.rtp_session, true);
video_stream_use_video_preset(veData->videoStream, "custom");
video_stream_set_sent_video_size(veData->videoStream, size);
video_stream_set_preview_size(veData->videoStream, size);
video_stream_enable_self_view(veData->videoStream, TRUE);
ortp_message("Video Stream : [%p] & native window id : [%p]",veData->videoStream, veData->native_window_id);
video_stream_set_native_window_id(veData->videoStream, veData->native_window_id);
ortp_message("Video Stream : [%p] & preview window id : [%p]",veData->videoStream, veData->native_preview_window_id);
video_stream_set_native_preview_window_id(veData->videoStream, veData->native_preview_window_id);
video_stream_use_preview_video_window(veData->videoStream, TRUE);
video_stream_set_device_rotation(veData->videoStream, 0);
video_stream_set_fps(veData->videoStream, 10.0);
// link audio with video
audio_stream_link_video(veData->audioStream, veData->videoStream);
ms_message("Setting webcam as %p", veData->msWebCam);
if(bRetVal != -1 && video_stream_start(veData->videoStream, veData->prof,
cremote_ip,
remote_port,
cremote_ip,
remote_port + 1,
101,
60,
veData->msWebCam) >=0 ) {
ortp_warning("VideoStream started successfully");
veData->rtpSession = veData->videoStream->ms.sessions.rtp_session;
video_stream_set_rtcp_information(veData->videoStream, cname,rtcp_tool);
}
else
{
ortp_warning("VideoStream start failed");
bRetVal = -1;
}
(*env)->ReleaseStringUTFChars(env, remote_ip, cremote_ip);
return bRetVal;
}

Okay, finally with the help of #belledonne-communications.
we figured out we were sending both the streams on same port.
which is not possible. It needs to be sent on different ports. We corrected it and it worked.

Related

audio-echo project issue when write out data

I am working in karaoke android app, this is example:
https://github.com/koseonjae/Karaoke
That base on example of Google, please take a look at this trunk:
if(fos == nullptr)
fos = fopen("/sdcard/recorded_audio.pcm", "wb");
fwrite(buf->buf_, 1, buf->size_, fos);
This trunk of code about work on my device S7 Samsung, but crash in other device, so I change it to save buf to vector and write out when recording stoped:
bool EngineService(void *ctx, uint32_t msg, void *data) {
assert(ctx == &engine);
switch (msg) {
case ENGINE_SERVICE_MSG_RETRIEVE_DUMP_BUFS: {
*(static_cast<uint32_t *>(data)) = dbgEngineGetBufCount();
break;
}
case ENGINE_SERVICE_MSG_RECORDED_AUDIO_AVAILABLE: {
// adding audio delay effect
sample_buf *buf = static_cast<sample_buf *>(data);
assert(engine.fastPathFramesPerBuf_ ==
buf->size_ / engine.sampleChannels_ / (engine.bitsPerSample_ / 8));
engine.delayEffect_->process(reinterpret_cast<int16_t *>(buf->buf_),
engine.fastPathFramesPerBuf_);
// TODO error crash in some device that has low speed sdcard -> using vector for save data
/*if(fos == nullptr)
fos = fopen("/sdcard/recorded_audio.pcm", "wb");
fwrite(buf->buf_, 1, buf->size_, fos);*/
sample_buf output = sample_buf();
output.buf_ = new uint8_t();
*output.buf_ = *buf->buf_;
output.size_ = buf->size_;
bucket.push_back(output);
break;
}
default:
assert(false);
return false;
}
return true;
}
In stop function stopPlay(JNIEnv *env, jclass type) - I added:
if(fos == nullptr)
fos = fopen("/sdcard/recorded_audio.pcm", "wb");
LOGE("====stopPlay %u", bucket.size());
for(auto const& value: bucket) {
fwrite(value.buf_, 1, value.size_, fos);
}
fclose(fos);
fos = nullptr;
I don't know why the fwrite which write directly to sdcard work in EngineService but when My Vector bucket write out ok, but I cannot hear anything - just beep beep sound...
Does someone explane it? Something wrong here?
P/S: I think write to memory like vector is faster than sdcard? It true, isn't it? But not working

Cannot play a video from internal storage with the native codec given by Google using adb

I am using the native codec app given by Google: (https://github.com/googlesamples/android-ndk/tree/master/native-codec).
The app has a folder (assets) which contains some video samples to play.
My purpose is to read videos from the internal storage of the phone (i.e /sdcard/filename.mp4).
I added these 2 lines to the manifest file but this hasn't helped to fix the issue yet.
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
I modified the code to get the video filename as an argument given by adb shell.
Here is the code:
mSourceString = getIntent().getStringExtra("arg");
if (!mCreated) {
if (mSourceString != null) {
mCreated = createStreamingMediaPlayer(getResources().getAssets(), mSourceString);
}
}
if (mCreated) {
mIsPlaying = !mIsPlaying;
setPlayingStreamingMediaPlayer(mIsPlaying);
}
The native code of the method which reads the video filename is the following:
jboolean Java_com_example_mohammed_myapplication_MainActivity_createStreamingMediaPlayer(JNIEnv* env, jclass clazz, jobject assetMgr, jstring filename)
{
LOGV("### create");
// convert Java string to UTF-8
const char *utf8 = env->GetStringUTFChars(filename, NULL);
LOGV("opening %s", utf8);
off_t outStart, outLen;
int fd = AAsset_openFileDescriptor(AAssetManager_open(AAssetManager_fromJava(env, assetMgr), utf8, 0),
&outStart, &outLen);
env->ReleaseStringUTFChars(filename, utf8);
if (fd < 0) {
LOGE("failed to open file: %s %d (%s)", utf8, fd, strerror(errno));
return JNI_FALSE;
}
data.fd = fd;
workerdata *d = &data;
AMediaExtractor *ex = AMediaExtractor_new();
media_status_t err = AMediaExtractor_setDataSourceFd(ex, d->fd,
static_cast<off64_t>(outStart),
static_cast<off64_t>(outLen));
close(d->fd);
if (err != AMEDIA_OK) {
LOGV("setDataSource error: %d", err);
return JNI_FALSE;
}
int numtracks = AMediaExtractor_getTrackCount(ex);
AMediaCodec *codec = NULL;
LOGV("input has %d tracks", numtracks);
for (int i = 0; i < numtracks; i++) {
AMediaFormat *format = AMediaExtractor_getTrackFormat(ex, i);
const char *s = AMediaFormat_toString(format);
LOGV("track %d format: %s", i, s);
const char *mime;
if (!AMediaFormat_getString(format, AMEDIAFORMAT_KEY_MIME, &mime)) {
LOGV("no mime type");
return JNI_FALSE;
} else if (!strncmp(mime, "video/", 6)) {
// Omitting most error handling for clarity.
// Production code should check for errors.
AMediaExtractor_selectTrack(ex, i);
codec = AMediaCodec_createDecoderByType(mime);
AMediaCodec_configure(codec, format, d->window, NULL, 0);
d->ex = ex;
d->codec = codec;
d->renderstart = -1;
d->sawInputEOS = false;
d->sawOutputEOS = false;
d->isPlaying = false;
d->renderonce = true;
AMediaCodec_start(codec);
}
AMediaFormat_delete(format);
}
mlooper = new mylooper();
mlooper->post(kMsgCodecBuffer, d);
return JNI_TRUE;
}
The app plays the videos successfully when they are in the "assets" folder, i.e inside the app. But when a video is outside the app (internal/external storage) the app stops working.
Is there a solution for this issue?
Apart from adding storage permission, the user needs to give manual permission also.
For testing purpose, you can go to Settings-> Apps-> your app-> Permissions-> enable storage permission. Should work fine then.
For production purpose, you should ask for permission via dialogue. There are plenty of tutorials for that.

Simple encoder Gstreamer on Android

I just try to make a simple demo with encoder in Gstreamer 1.0. However it does not work. Always return error when calling "push-buffer".
#define BUFFER_SIZE 1228800 //640x480x 4 byte/pixel
static void cb_need_video_data (GstElement *appsrc,guint unused_size, CustomData* user_data)
{
static gboolean white = FALSE;
static GstClockTime timestamp = 0;
GstBuffer *buffer;
guint size = BUFFER_SIZE;
GstFlowReturn ret;
user_data->num_frame++;
if (user_data->num_frame >= TOTAL_FRAMES) {
/* we are EOS, send end-of-stream and remove the source */
g_signal_emit_by_name (user_data->source, "end-of-stream", &ret);
g_print ("start TOTAL_FRAMES");
return ;
}
buffer = gst_buffer_new_allocate (NULL, size, NULL);
gst_buffer_memset (buffer, 0, white ? 0xff : 0x0, size);
white = !white;
GST_BUFFER_PTS (buffer) = timestamp;
GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 2);
timestamp += GST_BUFFER_DURATION (buffer);
g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {
/* something wrong, stop pushing */
GST_DEBUG("QUIT MAIN LOOP %d",ret);
g_main_loop_quit (user_data->main_loop);
}
}
void gst_set_pipeline(JNIEnv* env, jobject thiz,jstring uri){
CustomData *data = GET_CUSTOM_DATA (env, thiz, custom_data_field_id);
gchar *string = NULL;
string =
g_strdup_printf ("appsrc is-live=true name=testsource "
"queue ! videoconvert ! videoscale "
"add-borders=true ! videorate ! x264enc "
"! mpegtsmux "
"! filesink "
"location=/storage/emulated/0/test.avi sync=false append=true",
data->pipeline = gst_parse_launch (string, NULL);
g_free (string);
if (data->pipeline == NULL) {
g_print ("Bad sink\n");
return;
}
data->vsource = gst_bin_get_by_name (GST_BIN (data->pipeline), "testsource");
GstCaps *caps;
caps = gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "RGB",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 480,
"framerate", GST_TYPE_FRACTION, 25, 1,
NULL);
gst_app_src_set_caps(GST_APP_SRC(data->vsource), caps);
g_object_set (G_OBJECT(data->vsource),"stream-type", 0, "format", GST_FORMAT_TIME, NULL);
g_signal_connect (data->vsource, "need-data", G_CALLBACK (cb_need_video_data), data);
}
When I set state to GST_STATE_PLAYING, it go into cb_need_video_data once time and get ret != GST_FLOW_OK . I think the caps and input data don't match, but I don't know why.
I'm just newbie in Gstreamer. Please help me correct above code. Thanks all

Getting error as libOpenSLES Leaving BufferQueue::Enqueue (SL_RESULT_BUFFER_INSUFFICIENT)

Hi I am working on Android JNI Audio part, below jni code I am calling from android 12 times as per my logic. As soon as 1st clip played I send callback event to android and I am again calling selectClip() to play audio clip. I am able to play 2 times but on third call application throwing error as libOpenSLES Leaving BufferQueue : SL_RESULT_BUFFER_INSUFFICIENT.
Am I missing something in this?
Any pointer on the same to resolve this?
jboolean flag=JNI_TRUE;
jint clipNote = 0;
// select the desired clip and play count, and enqueue the first buffer if idle
JNIEXPORT jboolean JNICALL Java_com_example_nativeaudio_NativeAudio_selectClip(JNIEnv * env, jobject obj,
jint count)
{
// sleep logic
while((clock() - t)/1000 < 2000) {
usleep(10000); // sleep for 10ms
}
if(clipNote < 12) {
if(flag == JNI_TRUE) {
__android_log_print(ANDROID_LOG_DEBUG , "CustomTag", " flag = true : ClipNote : %d",clipNote);
clipNote = clipNote + 1;
nextBuffer = (short *) audio1;
nextSize = sizeof(audio1);
nextCount = count/2000;
if (nextSize > 0) {
// here we only enqueue one buffer because it is a long clip,
// but for streaming playback we would typically enqueue at least 2 buffers to start
SLresult result;
result = (*bqPlayerBufferQueue)->Enqueue(bqPlayerBufferQueue, nextBuffer, nextSize);
if (SL_RESULT_SUCCESS != result) {
return JNI_FALSE;
}
}
// callback to android
jclass cls = (*env)->GetObjectClass(env, obj);
jmethodID mid = (*env)->GetMethodID(env, cls, "callBackStart", "(I)V");
if (mid == 0) {
return;
}
flag=JNI_FALSE;
(*env)->CallVoidMethod(env, obj, mid, clipNote);
} else {
// callback to android
__android_log_print(ANDROID_LOG_DEBUG , "CustomTag", " flag = false");
jclass cls = (*env)->GetObjectClass(env, obj);
jmethodID mid = (*env)->GetMethodID(env, cls, "callBackRelease", "(I)V");
if (mid == 0) {
return;
}
flag = JNI_TRUE;
(*env)->CallVoidMethod(env, obj, mid, count);
}
t = clock();
} else {
SLresult result;
// make sure the asset audio player was created
if (NULL != fdPlayerPlay) {
result = (*fdPlayerPlay)>SetPlayState(fdPlayerPlay,SL_PLAYSTATE_PAUSED);
assert(SL_RESULT_SUCCESS == result);
(void)result;
}
}
return JNI_TRUE;
}
SL_RESULT_BUFFER_INSUFFICIENT error was occurring due to the issue in timing.
Second thread gets started before the first is complete.
The third thread gets started when the first two are in progress. This cannot be handled so it was throwing an error.
The timing was increased sufficient enough to complete the first thread, the issue was solved.
Set numbuffers
Chances are your numbuffers on your SLDataLocator_AndroidSimpleBufferQueue is inappropriately low.
SLDataLocator_AndroidSimpleBufferQueue in_loc;
in_loc.numBuffers = 5; // what number do you have here?
From the OpenSL ES spec:
If the maximum number of buffers specified in the SLDataLocator_BufferQueue structure used as the data source when creating the media object using the CreateAudioPlayer or CreateMidiPlayer method has been reached, the buffer is not added to the buffer queue and SL_RESULT_BUFFER_INSUFFICIENT is returned. At this point the client should wait until it receives a callback notification for a buffer completion at which time it can enqueue the buffer.
Alternatives
If changing your numbuffers to even a very high number doesn't work, ensure that you set your state to playing:
(*player)->SetPlayState( player, SL_PLAYSTATE_PLAYING ); // begin playback
You can call (*bqPlayerBufferQueue)->Clear(bqPlayerBufferQueue); when you stop the player, that will allow the previous thread to shutdown without having to wait on a timer.

How to properly pass an asset FileDescriptor to FFmpeg using JNI in Android

I'm trying to retrieve metadata in Android using FFmpeg, JNI and a Java FileDescriptor and it isn't' working. I know FFmpeg supports the pipe protocol so I'm trying to emmulate: "cat test.mp3 | ffmpeg i pipe:0" programmatically. I use the following code to get a FileDescriptor from an asset bundled with the Android application:
FileDescriptor fd = getContext().getAssets().openFd("test.mp3").getFileDescriptor();
setDataSource(fd, 0, 0x7ffffffffffffffL); // native function, shown below
Then, in my native (In C++) code I get the FileDescriptor by calling:
static void wseemann_media_FFmpegMediaMetadataRetriever_setDataSource(JNIEnv *env, jobject thiz, jobject fileDescriptor, jlong offset, jlong length)
{
//...
int fd = jniGetFDFromFileDescriptor(env, fileDescriptor); // function contents show below
//...
}
// function contents
static int jniGetFDFromFileDescriptor(JNIEnv * env, jobject fileDescriptor) {
jint fd = -1;
jclass fdClass = env->FindClass("java/io/FileDescriptor");
if (fdClass != NULL) {
jfieldID fdClassDescriptorFieldID = env->GetFieldID(fdClass, "descriptor", "I");
if (fdClassDescriptorFieldID != NULL && fileDescriptor != NULL) {
fd = env->GetIntField(fileDescriptor, fdClassDescriptorFieldID);
}
}
return fd;
}
I then pass the file descriptor pipe # (In C) to FFmpeg:
char path[256] = "";
FILE *file = fdopen(fd, "rb");
if (file && (fseek(file, offset, SEEK_SET) == 0)) {
char str[20];
sprintf(str, "pipe:%d", fd);
strcat(path, str);
}
State *state = av_mallocz(sizeof(State));
state->pFormatCtx = NULL;
if (avformat_open_input(&state->pFormatCtx, path, NULL, &options) != 0) { // Note: path is in the format "pipe:<the FD #>"
printf("Metadata could not be retrieved\n");
*ps = NULL;
return FAILURE;
}
if (avformat_find_stream_info(state->pFormatCtx, NULL) < 0) {
printf("Metadata could not be retrieved\n");
avformat_close_input(&state->pFormatCtx);
*ps = NULL;
return FAILURE;
}
// Find the first audio and video stream
for (i = 0; i < state->pFormatCtx->nb_streams; i++) {
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) {
video_index = i;
}
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) {
audio_index = i;
}
set_codec(state->pFormatCtx, i);
}
if (audio_index >= 0) {
stream_component_open(state, audio_index);
}
if (video_index >= 0) {
stream_component_open(state, video_index);
}
printf("Found metadata\n");
AVDictionaryEntry *tag = NULL;
while ((tag = av_dict_get(state->pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
printf("Key %s: \n", tag->key);
printf("Value %s: \n", tag->value);
}
*ps = state;
return SUCCESS;
My issue is avformat_open_input doesn't fail but it also doesn't let me retrieve any metadata or frames, The same code works if I use a regular file URI (e.g file://sdcard/test.mp3) as the path. What am I doing wrong? Thanks in advance.
Note: if you would like to look at all of the code I'm trying to solve the issue in order to provide this functionality for my library: FFmpegMediaMetadataRetriever.
Java
AssetFileDescriptor afd = getContext().getAssets().openFd("test.mp3");
setDataSource(afd.getFileDescriptor(), afd.getStartOffset(), fd.getLength());
C
void ***_setDataSource(JNIEnv *env, jobject thiz,
jobject fileDescriptor, jlong offset, jlong length)
{
int fd = jniGetFDFromFileDescriptor(env, fileDescriptor);
char path[20];
sprintf(path, "pipe:%d", fd);
State *state = av_mallocz(sizeof(State));
state->pFormatCtx = avformat_alloc_context();
state->pFormatCtx->skip_initial_bytes = offset;
state->pFormatCtx->iformat = av_find_input_format("mp3");
and now we can continue as usual:
if (avformat_open_input(&state->pFormatCtx, path, NULL, &options) != 0) {
printf("Metadata could not be retrieved\n");
*ps = NULL;
return FAILURE;
}
...
Even better, use <android/asset_manager.h>, like this:
Java
setDataSource(getContext().getAssets(), "test.mp3");
C
#include <android/asset_manager_jni.h>
void ***_setDataSource(JNIEnv *env, jobject thiz,
jobject assetManager, jstring assetName)
{
AAssetManager* assetManager = AAssetManager_fromJava(env, assetManager);
const char *szAssetName = (*env)->GetStringUTFChars(env, assetName, NULL);
AAsset* asset = AAssetManager_open(assetManager, szAssetName, AASSET_MODE_RANDOM);
(*env)->ReleaseStringUTFChars(env, assetName, szAssetName);
off_t offset, length;
int fd = AAsset_openFileDescriptor(asset, &offset, &length);
AAsset_close(asset);
Disclaimer: error checking was omitted for brevity, but resources are released correctly, except for fd. You must close(fd) when finished.
Post Scriptum: note that some media formats, e.g. mp4 need seekable protocol, and pipe: cannot help. In such case, you may try sprintf(path, "/proc/self/fd/%d", fd);, or use the custom saf: protocol.
Thks a lot for this post.
That help me a lot to integrate Android 10 and scoped storage with FFmpeg using FileDescriptor.
Here the solution I'm using on Android 10:
Java
URI uri = ContentUris.withAppendedId(
MediaStore.Audio.Media.EXTERNAL_CONTENT_URI,
trackId // Coming from `MediaStore.Audio.Media._ID`
);
ParcelFileDescriptor parcelFileDescriptor = getContentResolver().openFileDescriptor(
uri,
"r"
);
int pid = android.os.Process.myPid();
String path = "/proc/" + pid + "/fd/" + parcelFileDescriptor.dup().getFd();
loadFFmpeg(path); // Call native code
CPP
// Native code, `path` coming from Java `loadFFmpeg(String)`
avformat_open_input(&format, path, nullptr, nullptr);
OK, I spent a lot of time trying to transfer media data to ffmpeg through Assetfiledescriptor. Finally, I found that there may be a bug in mov.c. When mov.c parsed the trak atom, the corresponding skip_initial_bytes was not set. I have tried to fix this problem.
Detail please refer to FFmpegForAndroidAssetFileDescriptor, demo refer to WhatTheCodec.
FileDescriptor fd = getContext().getAssets().openFd("test.mp3").getFileDescriptor();
Think you should start with AssetFileDescripter.
http://developer.android.com/reference/android/content/res/AssetFileDescriptor.html

Categories

Resources