Simple encoder Gstreamer on Android - android

I just try to make a simple demo with encoder in Gstreamer 1.0. However it does not work. Always return error when calling "push-buffer".
#define BUFFER_SIZE 1228800 //640x480x 4 byte/pixel
static void cb_need_video_data (GstElement *appsrc,guint unused_size, CustomData* user_data)
{
static gboolean white = FALSE;
static GstClockTime timestamp = 0;
GstBuffer *buffer;
guint size = BUFFER_SIZE;
GstFlowReturn ret;
user_data->num_frame++;
if (user_data->num_frame >= TOTAL_FRAMES) {
/* we are EOS, send end-of-stream and remove the source */
g_signal_emit_by_name (user_data->source, "end-of-stream", &ret);
g_print ("start TOTAL_FRAMES");
return ;
}
buffer = gst_buffer_new_allocate (NULL, size, NULL);
gst_buffer_memset (buffer, 0, white ? 0xff : 0x0, size);
white = !white;
GST_BUFFER_PTS (buffer) = timestamp;
GST_BUFFER_DURATION (buffer) = gst_util_uint64_scale_int (1, GST_SECOND, 2);
timestamp += GST_BUFFER_DURATION (buffer);
g_signal_emit_by_name (appsrc, "push-buffer", buffer, &ret);
if (ret != GST_FLOW_OK) {
/* something wrong, stop pushing */
GST_DEBUG("QUIT MAIN LOOP %d",ret);
g_main_loop_quit (user_data->main_loop);
}
}
void gst_set_pipeline(JNIEnv* env, jobject thiz,jstring uri){
CustomData *data = GET_CUSTOM_DATA (env, thiz, custom_data_field_id);
gchar *string = NULL;
string =
g_strdup_printf ("appsrc is-live=true name=testsource "
"queue ! videoconvert ! videoscale "
"add-borders=true ! videorate ! x264enc "
"! mpegtsmux "
"! filesink "
"location=/storage/emulated/0/test.avi sync=false append=true",
data->pipeline = gst_parse_launch (string, NULL);
g_free (string);
if (data->pipeline == NULL) {
g_print ("Bad sink\n");
return;
}
data->vsource = gst_bin_get_by_name (GST_BIN (data->pipeline), "testsource");
GstCaps *caps;
caps = gst_caps_new_simple ("video/x-raw",
"format", G_TYPE_STRING, "RGB",
"width", G_TYPE_INT, 640,
"height", G_TYPE_INT, 480,
"framerate", GST_TYPE_FRACTION, 25, 1,
NULL);
gst_app_src_set_caps(GST_APP_SRC(data->vsource), caps);
g_object_set (G_OBJECT(data->vsource),"stream-type", 0, "format", GST_FORMAT_TIME, NULL);
g_signal_connect (data->vsource, "need-data", G_CALLBACK (cb_need_video_data), data);
}
When I set state to GST_STATE_PLAYING, it go into cb_need_video_data once time and get ret != GST_FLOW_OK . I think the caps and input data don't match, but I don't know why.
I'm just newbie in Gstreamer. Please help me correct above code. Thanks all

Related

Windows photo viewer does not open bitmap compressed images from Android studio

I am using android custom camera to captured JPG images, But not able to preview them on windows photo viewer. Can anyone please advise. Images are visible using other applications like Ms Paint, Office, Windows 10 Photo application.
This is obviously caused by an ICC color profile that will be added to the bitmaps by Android. The Windows photo viewer isn't capable to show the images with that ICC color profile.
I did not find out yet how this ICC profile is being generated, I assume that this is done by the Android Bitmap.compress function.
Removing the profile with e.g. ImageMagick will "repair" the files and they will open in photo viewer too.
As of Android 10, Bitmap.compress() seems to have added an ICC profile.
This issue was caused by an incorrect version value in the added ICC profile.
This issue was reported and fixed on the skia issue tracker.
I don't know when it will be fixed on Android.
https://bugs.chromium.org/p/skia/issues/detail?id=12491
So, deleting the ICC profile from the jpeg image generated with Bitmap.compress() seems to be the best way at the moment.
To solve it programmatically, you need to parse the JPEG data generated with Bitmap.compress() to delete the ICC profile segment.
Update 2022-09-07: This issue is fixed in Android 13.
( SKIA SkICC.cpp )
This is an example code to delete ICC Profile segment. Please refer to Exiv2's "The Metadata in JPEG files" for how the ICC Profile is embedded in the jpeg file.
Update 2022-10-28: Fixed incorrect APP2 segment copy
public class JpegFixer {
private static final int MARKER = 0xff;
private static final int MARKER_SOI = 0xd8;
private static final int MARKER_SOS = 0xda;
private static final int MARKER_APP2 = 0xe2;
private static final int MARKER_EOI = 0xd9;
public void removeIccProfile(InputStream inputStream, String outputPath) throws IOException {
FileOutputStream outputStream = new FileOutputStream( outputPath );
if ( inputStream.read() != MARKER || inputStream.read() != MARKER_SOI ) {
throw new IOException( "Not a JPEG image" );
}
outputStream.write( MARKER );
outputStream.write( MARKER_SOI );
while ( true ) {
int marker0 = inputStream.read();
int marker1 = inputStream.read();
if ( marker0 != MARKER ) {
throw new IOException( "Invalid marker:" + Integer.toHexString( marker0 & 0xff ) );
}
int length0 = inputStream.read();
int length1 = inputStream.read();
final int segmentLength = (length0 << 8 & 0xFF00) | (length1 & 0xFF);
final int length = segmentLength - 2;
if ( length < 0 ) {
throw new IOException( "Invalid length" );
}
if ( marker1 == MARKER_EOI || marker1 == MARKER_SOS ) {
outputStream.write( marker0 );
outputStream.write( marker1 );
outputStream.write( length0 );
outputStream.write( length1 );
copy( inputStream, outputStream );
break;
}
// ICC_PROFILE\0"
if ( marker1 == MARKER_APP2 && length >= 12 + 2 ) {
byte[] buffer = new byte[12];
read( inputStream, buffer );
if ( buffer[0] == 'I' && buffer[1] == 'C' &&
buffer[2] == 'C' && buffer[3] == '_' &&
buffer[4] == 'P' && buffer[5] == 'R' &&
buffer[6] == 'O' && buffer[7] == 'F' &&
buffer[8] == 'I' && buffer[9] == 'L' &&
buffer[10] == 'E' && buffer[11] == 0 ) {
// skip segment
inputStream.skip( length - buffer.length );
}
else {
// copy segment
outputStream.write( marker0 );
outputStream.write( marker1 );
outputStream.write( length0 );
outputStream.write( length1 );
outputStream.write( buffer );
copy( inputStream, outputStream, length - buffer.length );
}
}
else {
// copy segment
outputStream.write( marker0 );
outputStream.write( marker1 );
outputStream.write( length0 );
outputStream.write( length1 );
copy( inputStream, outputStream, length );
}
}
}
public void read(InputStream is, byte[] buffer) throws IOException {
int totalBytesRead = 0;
while ( totalBytesRead != buffer.length ) {
final int bytesRead = is.read( buffer, totalBytesRead, buffer.length - totalBytesRead );
if ( bytesRead == -1 ) {
throw new EOFException( "End of data reached." );
}
totalBytesRead += bytesRead;
}
}
private int copy(InputStream is, OutputStream out, int numBytes) throws IOException {
int remainder = numBytes;
byte[] buffer = new byte[8192];
while ( remainder > 0 ) {
int n = is.read( buffer, 0, Math.min( remainder, buffer.length ) );
if ( n == -1 ) {
break;
}
remainder -= n;
out.write( buffer, 0, n );
}
return numBytes;
}
private int copy(InputStream is, OutputStream os) throws IOException {
int total = 0;
byte[] buffer = new byte[8192];
int c;
while ( (c = is.read(buffer)) != -1 ) {
total += c;
os.write( buffer, 0, c );
}
return total;
}
}
Other Answers to this Question already explained the Issue which may be induced by some Bug related to the Image Compression techniques adopted by Android, preventing the image to be Opened in Windows Photo Viewer.
I have reported this issue to Google and Android Community multiple times but even after following many Android Upgrades, this issue still persist.
I understand this problem is irritating and in the wake of no direct solution, I wish to share an easy and reliable turnaround to this problem with Open Source Softwares which are reliable.
For a Single Image, use GIMP to open the Image and then Export (CTRL+SHIFT+E) the image as it is while replacing the existing file.
For Multiple Images, use CAESIUM Image Compressor with Quality set to 100%.

Start Linphone MediaStreamer2 Audio & Video Stream Together

https://github.com/BelledonneCommunications/mediastreamer2
https://github.com/BelledonneCommunications/linphone-android
Using only mediastreamer2 library I am able to start a audio call with remote ip and port given and calling audiostream.c respective methods.
i then needed to start video call as well. so initialised videostream.c and
using its respective methods and providing it with surfaces to render remote and local camera feed. i am able to start video stream successfully with remote port and ip given.
But problem is when i start both streams together. sound stops and video streaming also gets stop. only local camera feed works.
So i have a one magical method that do all this for me. if i comment the video part of it. audio call works fine and if comment the audio part, video call works fine. but when i start both. No sound No Streaming.
but we get AudioStream started successfully and VideoStream started successfully log.
Can someone with linphone experience help as figuring the correct sequence of methods? or what are we doing wrong. Here is our method.
JNIEXPORT jint JNICALL Java_com_myapp_services_LinPhoneMSEngine_LinPhoneMSVE_1AudioStreamStartFull
(JNIEnv *env, jclass self, jstring remote_ip, jint remote_port, jint localport, jint payloadindex, jboolean isLowEnd)
{
int bRetVal = 0;
MSVideoSize size = {320, 240};
char rtcp_tool[128]={0};
int ret;
//jboolean copy;
char cname[128]={0};
const char *cremote_ip;
ortp_warning("Audio Stream Start Full");
LOGD("Audio Stream Start Full");
cremote_ip = (*env)->GetStringUTFChars(env, remote_ip, NULL);
ortp_warning("Cremote_ip= %s", cremote_ip);
LOGD("Cremote_ip= %s", cremote_ip);
// ms_filter_enable_statistics(TRUE);
veData->queue = ortp_ev_queue_new();
veData->soundCard = NULL;
set_playback_device();
ortp_warning("sound: playback_dev_id: %s", ms_snd_card_get_string_id(veData->soundCard));
LOGD("sound: playback_dev_id: %s", ms_snd_card_get_string_id(veData->soundCard));
veData->CaptureCard = NULL;
set_capture_device();
ortp_warning("sound: capture_dev_id: %s", ms_snd_card_get_string_id(veData->CaptureCard));
LOGD("sound: capture_dev_id: %s", ms_snd_card_get_string_id(veData->CaptureCard));
veData->audioStream = audio_stream_new(msFactory ,localport, localport + 1, false);
audio_stream_enable_adaptive_bitrate_control(veData->audioStream, true);
audio_stream_enable_adaptive_jittcomp(veData->audioStream, true);
rtp_session_set_jitter_compensation(veData->audioStream->ms.sessions.rtp_session, 50);
rtp_session_enable_rtcp_mux(veData->audioStream->ms.sessions.rtp_session, true);
ret=AUDIO_STREAM_FEATURE_VOL_SND | \
AUDIO_STREAM_FEATURE_VOL_RCV;
if (!isLowEnd)
{
ret = ret | AUDIO_STREAM_FEATURE_EC | AUDIO_STREAM_FEATURE_EQUALIZER | AUDIO_STREAM_FEATURE_DTMF | AUDIO_STREAM_FEATURE_DTMF_ECHO;
audio_stream_set_features(veData->audioStream, ret);
ortp_warning("Setting Echo Canceller params");
LOGD("Setting Echo Canceller params");
rtp_session_enable_jitter_buffer(veData->audioStream->ms.sessions.rtp_session, TRUE);
audio_stream_set_echo_canceller_params(veData->audioStream, 60, 0, 128);
audio_stream_enable_gain_control(veData->audioStream, true);
audio_stream_enable_automatic_gain_control(veData->audioStream, true);
}
else
{
audio_stream_set_features(veData->audioStream, ret);
ortp_warning("No Echo Canceller params!!");
LOGD("No Echo Canceller params!!");
rtp_session_enable_jitter_buffer(veData->audioStream->ms.sessions.rtp_session, FALSE);
}
if( veData->audioStream == NULL){
ortp_warning("AudioStream is Null");
LOGD("AudioStream is Null");
bRetVal = -1;
return -1;
}
audio_stream_play_received_dtmfs(veData->audioStream, true);
snprintf(rtcp_tool,sizeof(rtcp_tool)-1,"%s-%s","Android","2.8.0");
snprintf(cname,sizeof(cname)-1,"%s-%d", cremote_ip, remote_port);
ortp_warning("cname value: %s",cname);
LOGD("cname value: %s",cname);
audio_stream_prepare_sound(veData->audioStream, veData->soundCard, veData->CaptureCard);
if(0== audio_stream_start_full(veData->audioStream,veData->prof, cremote_ip, remote_port, cremote_ip, remote_port + 1, 114, 50,NULL,NULL,veData->soundCard,veData->CaptureCard, !isLowEnd))
{
veData->rtpSession = veData->audioStream->ms.sessions.rtp_session;
ortp_warning("AudioStreamStartFull Success");
post_audio_config(veData->audioStream);
audio_stream_set_rtcp_information(veData->audioStream, cname, rtcp_tool);
}
else
{
ortp_warning("AudioStream start failed");
bRetVal = -1;
}
// init video stream
veData->videoStream = video_stream_new(msFactory, localport,localport+1,false);
video_stream_enable_adaptive_bitrate_control(veData->videoStream, true);
video_stream_enable_adaptive_jittcomp(veData->videoStream, true);
rtp_session_enable_rtcp_mux(veData->videoStream->ms.sessions.rtp_session, true);
video_stream_use_video_preset(veData->videoStream, "custom");
video_stream_set_sent_video_size(veData->videoStream, size);
video_stream_set_preview_size(veData->videoStream, size);
video_stream_enable_self_view(veData->videoStream, TRUE);
ortp_message("Video Stream : [%p] & native window id : [%p]",veData->videoStream, veData->native_window_id);
video_stream_set_native_window_id(veData->videoStream, veData->native_window_id);
ortp_message("Video Stream : [%p] & preview window id : [%p]",veData->videoStream, veData->native_preview_window_id);
video_stream_set_native_preview_window_id(veData->videoStream, veData->native_preview_window_id);
video_stream_use_preview_video_window(veData->videoStream, TRUE);
video_stream_set_device_rotation(veData->videoStream, 0);
video_stream_set_fps(veData->videoStream, 10.0);
// link audio with video
audio_stream_link_video(veData->audioStream, veData->videoStream);
ms_message("Setting webcam as %p", veData->msWebCam);
if(bRetVal != -1 && video_stream_start(veData->videoStream, veData->prof,
cremote_ip,
remote_port,
cremote_ip,
remote_port + 1,
101,
60,
veData->msWebCam) >=0 ) {
ortp_warning("VideoStream started successfully");
veData->rtpSession = veData->videoStream->ms.sessions.rtp_session;
video_stream_set_rtcp_information(veData->videoStream, cname,rtcp_tool);
}
else
{
ortp_warning("VideoStream start failed");
bRetVal = -1;
}
(*env)->ReleaseStringUTFChars(env, remote_ip, cremote_ip);
return bRetVal;
}
Okay, finally with the help of #belledonne-communications.
we figured out we were sending both the streams on same port.
which is not possible. It needs to be sent on different ports. We corrected it and it worked.

Recieving text data from remote android bluetooth to Windows xp PC

I want to recieve text data from remote android bluetooth. I had written simple winApi server, but it doesnt respond to remote android sending. The connection was fine and did responds( paired) using control panel manager with my bluetooth. When sending and recieving over control panel manager was working fine. But This time, I want to test using some c++ winapi. Here's my code so far. It was adapted from somewhere codeproject site :)
#include <stdio.h>
#include <stdlib.h>
#include <sstream>
#include <string>
#include <tchar.h>
#include <WinSock2.h>
#include <ws2bth.h>
#include <BluetoothAPIs.h>
using namespace std;
int _tmain(int argc, _TCHAR* argv[])
{
WORD wVersionRequested = 0x202;
WSADATA m_data;
if (0 == WSAStartup(wVersionRequested, &m_data))
{
SOCKET s = socket(AF_BTH, SOCK_STREAM, BTHPROTO_RFCOMM);
const DWORD lastError = GetLastError();
if (s == INVALID_SOCKET)
{
ostringstream stream;
stream << lastError;
string data= stream.str();
printf("Failed to get bluetooth socket! %s\n", data.c_str());
return 1;
}
WSAPROTOCOL_INFO protocolInfo;
int protocolInfoSize = sizeof(protocolInfo);
if (0 != getsockopt(s, SOL_SOCKET, SO_PROTOCOL_INFO,
(char*)&protocolInfo, &protocolInfoSize))
{
return 1;
}
SOCKADDR_BTH address;
address.addressFamily = AF_BTH;
address.btAddr = 0;
address.serviceClassId = GUID_NULL;
address.port = BT_PORT_ANY;
sockaddr *pAddr = (sockaddr*)&address;
if (0 != bind(s, pAddr, sizeof(SOCKADDR_BTH)))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str() );
}
else
{
printf("\nBinding Successful....\n");
int length = sizeof(SOCKADDR_BTH) ;
getsockname(s,(sockaddr*)&address,&length);
wprintf (L"Local Bluetooth device is %04x%08x \nServer channel = %d\n",
GET_NAP(address.btAddr), GET_SAP(address.btAddr), address.port);
}
int size = sizeof(SOCKADDR_BTH);
if (0 != getsockname(s, pAddr, &size))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str());
}
if (0 != listen(s, 10))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str());
}
WSAQUERYSET service;
memset(&service, 0, sizeof(service));
service.dwSize = sizeof(service);
// service.lpszServiceInstanceName = reinterpret_cast<LPWSTR>(_T("Accelerometer Data..."));
//service.lpszServiceInstanceName = ;
// service.lpszComment = reinterpret_cast<LPWSTR>(_T("Pushing data to PC"));
GUID serviceID = OBEXFileTransferServiceClass_UUID;
service.lpServiceClassId = &serviceID;
service.dwNumberOfCsAddrs = 1;
service.dwNameSpace = NS_BTH;
CSADDR_INFO csAddr;
memset(&csAddr, 0, sizeof(csAddr));
csAddr.LocalAddr.iSockaddrLength = sizeof(SOCKADDR_BTH);
csAddr.LocalAddr.lpSockaddr = pAddr;
csAddr.iSocketType = SOCK_STREAM;
csAddr.iProtocol = BTHPROTO_RFCOMM;
service.lpcsaBuffer = &csAddr;
if (0 != WSASetService(&service, RNRSERVICE_REGISTER, 0))
{
printf("Service registration failed....");
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%d\n", data.c_str());
}
else
{
printf("\nService registration Successful....\n");
}
HANDLE hRadio ;
BLUETOOTH_FIND_RADIO_PARAMS btfrp = { sizeof(BLUETOOTH_FIND_RADIO_PARAMS) };
HBLUETOOTH_RADIO_FIND hFind = BluetoothFindFirstRadio(&btfrp, &hRadio);
if ((hFind != NULL)&& (hRadio !=NULL))
{
if (BluetoothEnableDiscovery(hRadio,TRUE)){
printf("BluetoothEnableDiscovery() is working!\n");
}
if (BluetoothEnableIncomingConnections(hRadio,TRUE)){
printf("BluetoothEnableIncomingConnections() is working!\n");
}
if (BluetoothIsConnectable(hRadio)){
printf("BluetoothIsConnectable() is working!\n");
}
}
printf("\nBefore accept.........");
SOCKADDR_BTH sab2;
int ilen = sizeof(sab2);
SOCKET s2 = accept (s,(sockaddr*)&sab2, &ilen);
if (s2 == INVALID_SOCKET)
{
wprintf (L"Socket bind, error %d\n", WSAGetLastError ());
}
wprintf (L"\nConnection came from %04x%08x to channel %d\n",
GET_NAP(sab2.btAddr), GET_SAP(sab2.btAddr), sab2.port);
wprintf (L"\nAfter Accept\n");
char buffer[1024] = {0};
memset(buffer, 0, sizeof(buffer));
int r = recv(s2,(char*)buffer, sizeof(buffer), 0);
printf("%s\n",buffer);
closesocket(s2);
if (0 != WSASetService(&service, RNRSERVICE_DELETE, 0))
{
ostringstream stream;
stream << GetLastError();
string data= stream.str();
printf("%s\n", data.c_str());
}
closesocket(s);
WSACleanup();
return 0;
}
}
I use microsoft BT device btw..
Just to be sure: you said you're on WinXP... OK, but WinXP SP3 with Microsoft BT stack and appropriate dongle? or another XP version and another BT stack (like Widcomm or BlueSoleil...)?
In the later case, BT Socket API won't work...
Otherwise, not much to say about the code, except a missing BluetoothFindRadioClose...

How to properly pass an asset FileDescriptor to FFmpeg using JNI in Android

I'm trying to retrieve metadata in Android using FFmpeg, JNI and a Java FileDescriptor and it isn't' working. I know FFmpeg supports the pipe protocol so I'm trying to emmulate: "cat test.mp3 | ffmpeg i pipe:0" programmatically. I use the following code to get a FileDescriptor from an asset bundled with the Android application:
FileDescriptor fd = getContext().getAssets().openFd("test.mp3").getFileDescriptor();
setDataSource(fd, 0, 0x7ffffffffffffffL); // native function, shown below
Then, in my native (In C++) code I get the FileDescriptor by calling:
static void wseemann_media_FFmpegMediaMetadataRetriever_setDataSource(JNIEnv *env, jobject thiz, jobject fileDescriptor, jlong offset, jlong length)
{
//...
int fd = jniGetFDFromFileDescriptor(env, fileDescriptor); // function contents show below
//...
}
// function contents
static int jniGetFDFromFileDescriptor(JNIEnv * env, jobject fileDescriptor) {
jint fd = -1;
jclass fdClass = env->FindClass("java/io/FileDescriptor");
if (fdClass != NULL) {
jfieldID fdClassDescriptorFieldID = env->GetFieldID(fdClass, "descriptor", "I");
if (fdClassDescriptorFieldID != NULL && fileDescriptor != NULL) {
fd = env->GetIntField(fileDescriptor, fdClassDescriptorFieldID);
}
}
return fd;
}
I then pass the file descriptor pipe # (In C) to FFmpeg:
char path[256] = "";
FILE *file = fdopen(fd, "rb");
if (file && (fseek(file, offset, SEEK_SET) == 0)) {
char str[20];
sprintf(str, "pipe:%d", fd);
strcat(path, str);
}
State *state = av_mallocz(sizeof(State));
state->pFormatCtx = NULL;
if (avformat_open_input(&state->pFormatCtx, path, NULL, &options) != 0) { // Note: path is in the format "pipe:<the FD #>"
printf("Metadata could not be retrieved\n");
*ps = NULL;
return FAILURE;
}
if (avformat_find_stream_info(state->pFormatCtx, NULL) < 0) {
printf("Metadata could not be retrieved\n");
avformat_close_input(&state->pFormatCtx);
*ps = NULL;
return FAILURE;
}
// Find the first audio and video stream
for (i = 0; i < state->pFormatCtx->nb_streams; i++) {
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && video_index < 0) {
video_index = i;
}
if (state->pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_index < 0) {
audio_index = i;
}
set_codec(state->pFormatCtx, i);
}
if (audio_index >= 0) {
stream_component_open(state, audio_index);
}
if (video_index >= 0) {
stream_component_open(state, video_index);
}
printf("Found metadata\n");
AVDictionaryEntry *tag = NULL;
while ((tag = av_dict_get(state->pFormatCtx->metadata, "", tag, AV_DICT_IGNORE_SUFFIX))) {
printf("Key %s: \n", tag->key);
printf("Value %s: \n", tag->value);
}
*ps = state;
return SUCCESS;
My issue is avformat_open_input doesn't fail but it also doesn't let me retrieve any metadata or frames, The same code works if I use a regular file URI (e.g file://sdcard/test.mp3) as the path. What am I doing wrong? Thanks in advance.
Note: if you would like to look at all of the code I'm trying to solve the issue in order to provide this functionality for my library: FFmpegMediaMetadataRetriever.
Java
AssetFileDescriptor afd = getContext().getAssets().openFd("test.mp3");
setDataSource(afd.getFileDescriptor(), afd.getStartOffset(), fd.getLength());
C
void ***_setDataSource(JNIEnv *env, jobject thiz,
jobject fileDescriptor, jlong offset, jlong length)
{
int fd = jniGetFDFromFileDescriptor(env, fileDescriptor);
char path[20];
sprintf(path, "pipe:%d", fd);
State *state = av_mallocz(sizeof(State));
state->pFormatCtx = avformat_alloc_context();
state->pFormatCtx->skip_initial_bytes = offset;
state->pFormatCtx->iformat = av_find_input_format("mp3");
and now we can continue as usual:
if (avformat_open_input(&state->pFormatCtx, path, NULL, &options) != 0) {
printf("Metadata could not be retrieved\n");
*ps = NULL;
return FAILURE;
}
...
Even better, use <android/asset_manager.h>, like this:
Java
setDataSource(getContext().getAssets(), "test.mp3");
C
#include <android/asset_manager_jni.h>
void ***_setDataSource(JNIEnv *env, jobject thiz,
jobject assetManager, jstring assetName)
{
AAssetManager* assetManager = AAssetManager_fromJava(env, assetManager);
const char *szAssetName = (*env)->GetStringUTFChars(env, assetName, NULL);
AAsset* asset = AAssetManager_open(assetManager, szAssetName, AASSET_MODE_RANDOM);
(*env)->ReleaseStringUTFChars(env, assetName, szAssetName);
off_t offset, length;
int fd = AAsset_openFileDescriptor(asset, &offset, &length);
AAsset_close(asset);
Disclaimer: error checking was omitted for brevity, but resources are released correctly, except for fd. You must close(fd) when finished.
Post Scriptum: note that some media formats, e.g. mp4 need seekable protocol, and pipe: cannot help. In such case, you may try sprintf(path, "/proc/self/fd/%d", fd);, or use the custom saf: protocol.
Thks a lot for this post.
That help me a lot to integrate Android 10 and scoped storage with FFmpeg using FileDescriptor.
Here the solution I'm using on Android 10:
Java
URI uri = ContentUris.withAppendedId(
MediaStore.Audio.Media.EXTERNAL_CONTENT_URI,
trackId // Coming from `MediaStore.Audio.Media._ID`
);
ParcelFileDescriptor parcelFileDescriptor = getContentResolver().openFileDescriptor(
uri,
"r"
);
int pid = android.os.Process.myPid();
String path = "/proc/" + pid + "/fd/" + parcelFileDescriptor.dup().getFd();
loadFFmpeg(path); // Call native code
CPP
// Native code, `path` coming from Java `loadFFmpeg(String)`
avformat_open_input(&format, path, nullptr, nullptr);
OK, I spent a lot of time trying to transfer media data to ffmpeg through Assetfiledescriptor. Finally, I found that there may be a bug in mov.c. When mov.c parsed the trak atom, the corresponding skip_initial_bytes was not set. I have tried to fix this problem.
Detail please refer to FFmpegForAndroidAssetFileDescriptor, demo refer to WhatTheCodec.
FileDescriptor fd = getContext().getAssets().openFd("test.mp3").getFileDescriptor();
Think you should start with AssetFileDescripter.
http://developer.android.com/reference/android/content/res/AssetFileDescriptor.html

Create video from images

Is there a way to create a video from a series of images on android? Maybe a way to extend the MediaRecorder and being able to take images as input.
I try to really create the video and store it (as an mpeg4 file for instance).
Thanks for any suggestions.
I'm also trying to do the same thing. I have been advice to use Libav.
http://libav.org/
However I need to build it with the NDK and I currently have some issues doing it.
I'm looking for some doc about it. I'll keep you posted.
I've created a post about it: Libav build for Android
You can use AnimationDrawable in an ImageView.
Add frames using the AnimationDrawable.addFrame(Drawable frame, int duration) method, and start the animation using AnimationDrawable.start().
Not sure if that's ideal, but it would work.
I use Android + NDK
AVFrame* OpenImage(const char* imageFileName)
{
AVFormatContext *pFormatCtx = avformat_alloc_context();
std::cout<<"1"<<imageFileName<<std::endl;
if( avformat_open_input(&pFormatCtx, imageFileName, NULL, NULL) < 0)
{
printf("Can't open image file '%s'\n", imageFileName);
return NULL;
}
std::cout<<"2"<<std::endl;
av_dump_format(pFormatCtx, 0, imageFileName, false);
AVCodecContext *pCodecCtx;
std::cout<<"3"<<std::endl;
pCodecCtx = pFormatCtx->streams[0]->codec;
pCodecCtx->width = W_VIDEO;
pCodecCtx->height = H_VIDEO;
//pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
// Find the decoder for the video stream
AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
if (!pCodec)
{
printf("Codec not found\n");
return NULL;
}
// Open codec
//if(avcodec_open2(pCodecCtx, pCodec)<0)
if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)//check this NULL, it should be of AVDictionary **options
{
printf("Could not open codec\n");
return NULL;
}
std::cout<<"4"<<std::endl;
//
AVFrame *pFrame;
pFrame = av_frame_alloc();
if (!pFrame)
{
printf("Can't allocate memory for AVFrame\n");
return NULL;
}
printf("here");
int frameFinished;
int numBytes;
// Determine required buffer size and allocate buffer
numBytes = avpicture_get_size( pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
uint8_t *buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));
avpicture_fill((AVPicture *) pFrame, buffer, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
// Read frame
AVPacket packet;
int framesNumber = 0;
while (av_read_frame(pFormatCtx, &packet) >= 0)
{
if(packet.stream_index != 0)
continue;
int ret = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
if (ret > 0)
{
printf("Frame is decoded, size %d\n", ret);
pFrame->quality = 4;
return pFrame;
}
else
printf("Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));
}
}
int combine_images_to_video(const char * infile_dir, const char * infile_prefix, const char* infile_surname, int total_frames,const char *outfile)
{
if (total_frames <= 0){
std::cout << "Usage: cv2ff <dir_name> <prefix> <image surname> <total frames> <outfile>" << std::endl;
std::cout << "Please check that the 4th argument is integer value of total frames"<<std::endl;
return 1;
}
printf("max %d frames\n",total_frames);
char *imageFileName;
char numberChar[NUMNUMBER];
// initialize FFmpeg library
av_register_all();
// av_log_set_level(AV_LOG_DEBUG);
int ret;
const int dst_width = W_VIDEO;
const int dst_height = H_VIDEO;
const AVRational dst_fps = {30, 1};//{fps,1}
// open output format context
AVFormatContext* outctx = nullptr;
ret = avformat_alloc_output_context2(&outctx, nullptr, nullptr, outfile);
//outctx->video_codec->
if (ret < 0) {
std::cerr << "fail to avformat_alloc_output_context2(" << outfile << "): ret=" << ret;
return 2;
}
// open output IO context
ret = avio_open2(&outctx->pb, outfile, AVIO_FLAG_WRITE, nullptr, nullptr);
if (ret < 0) {
std::cerr << "fail to avio_open2: ret=" << ret;
return 2;
}
// create new video stream
AVCodec* vcodec = avcodec_find_encoder(outctx->oformat->video_codec);
AVStream* vstrm = avformat_new_stream(outctx, vcodec);
if (!vstrm) {
std::cerr << "fail to avformat_new_stream";
return 2;
}
avcodec_get_context_defaults3(vstrm->codec, vcodec);
vstrm->codec->width = dst_width;
vstrm->codec->height = dst_height;
vstrm->codec->pix_fmt = vcodec->pix_fmts[0];
vstrm->codec->time_base = vstrm->time_base = av_inv_q(dst_fps);
vstrm->r_frame_rate = vstrm->avg_frame_rate = dst_fps;
if (outctx->oformat->flags & AVFMT_GLOBALHEADER)
vstrm->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
// open video encoder
ret = avcodec_open2(vstrm->codec, vcodec, nullptr);
if (ret < 0) {
std::cerr << "fail to avcodec_open2: ret=" << ret;
return 2;
}
std::cout
<< "outfile: " << outfile << "\n"
<< "format: " << outctx->oformat->name << "\n"
<< "vcodec: " << vcodec->name << "\n"
<< "size: " << dst_width << 'x' << dst_height << "\n"
<< "fps: " << av_q2d(dst_fps) << "\n"
<< "pixfmt: " << av_get_pix_fmt_name(vstrm->codec->pix_fmt) << "\n"
<< std::flush;
// initialize sample scaler
SwsContext* swsctx = sws_getCachedContext(
nullptr, dst_width, dst_height, AV_PIX_FMT_BGR24,
dst_width, dst_height, vstrm->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr);
if (!swsctx) {
std::cerr << "fail to sws_getCachedContext";
return 2;
}
// allocate frame buffer for encoding
AVFrame* frame = av_frame_alloc();
std::vector<uint8_t> framebuf(avpicture_get_size(vstrm->codec->pix_fmt, dst_width, dst_height));
avpicture_fill(reinterpret_cast<AVPicture*>(frame), framebuf.data(), vstrm->codec->pix_fmt, dst_width, dst_height);
frame->width = dst_width;
frame->height = dst_height;
frame->format = static_cast<int>(vstrm->codec->pix_fmt);
// encoding loop
avformat_write_header(outctx, nullptr);
int64_t frame_pts = 0;
unsigned nb_frames = 0;
bool end_of_stream = false;
int got_pkt = 0;
int i =0;
imageFileName = (char *)malloc(strlen(infile_dir)+strlen(infile_prefix)+NUMNUMBER+strlen(infile_surname)+1);
do{
if(!end_of_stream){
strcpy(imageFileName,infile_dir);
//strcat(imageFileName,"/");
strcat(imageFileName,infile_prefix);
sprintf(numberChar,"%03d",i+1);
strcat(imageFileName,numberChar);
//strcat(imageFileName,".");
strcat(imageFileName,infile_surname);
__android_log_print(1, "RecordingImage", "%s", imageFileName);
std::cout<<imageFileName<<std::endl;
AVFrame* frame_from_file = OpenImage(imageFileName);
if(!frame_from_file){
std::cout<<"error OpenImage"<<std::endl;
return 5;
}
//const int Stride [] = {1920};
sws_scale(swsctx, frame_from_file->data, &STRIDE , 0, frame_from_file->height, frame->data, frame->linesize);
frame->pts = frame_pts++;
av_frame_free(&frame_from_file);
}
// encode video frame
AVPacket pkt;
pkt.data = nullptr;
pkt.size = 0;
av_init_packet(&pkt);
ret = avcodec_encode_video2(vstrm->codec, &pkt, end_of_stream ? nullptr : frame, &got_pkt);
if (ret < 0) {
std::cerr << "fail to avcodec_encode_video2: ret=" << ret << "\n";
return 2;
}
// rescale packet timestamp
pkt.duration = 1;
av_packet_rescale_ts(&pkt, vstrm->codec->time_base, vstrm->time_base);
// write packet
av_write_frame(outctx, &pkt);
std::cout << nb_frames << '\r' << std::flush; // dump progress
++nb_frames;
av_free_packet(&pkt);
i++;
if(i==total_frames-1)
end_of_stream = true;
} while (i<total_frames);
av_write_trailer(outctx);
std::cout << nb_frames << " frames encoded" << std::endl;
av_frame_free(&frame);
avcodec_close(vstrm->codec);
avio_close(outctx->pb);
avformat_free_context(outctx);
free(imageFileName);
return 0;
}
We can create video from images using ffmpeg.
Check out my post for using ffmpeg in android.
Use below command to create video from images placed in same folder
String command[]={"-y", "-r","1/5" ,"-i",src.getAbsolutePath(),
"-c:v","libx264","-vf", "fps=25","-pix_fmt","yuv420p", dest.getAbsolutePath()};
Here ,
src.getAbsolutePath() is the absolute path of all your input images.
For example,
If all your images are stored in Images folder inside Pictures directory with names
extract_picture001.jpg,extract_picture002.jpg,extract_picture003.jpg......
.
Then,
String filePrefix = "extract_picture";
String fileExtn = ".jpg";
File picDir = Environment.getExternalStoragePublicDirectory(
Environment.DIRECTORY_PICTURES);
File dir = new File(picDir, "Images");
File src = new File(dir, filePrefix + "%03d" + fileExtn);
For creating video from images placed in different folder you
have to create a text file and add image paths to it and then specify
the path of that text file as an input option.
Example,
Text File
file '/storage/emulated/0/DCIM/Camera/P_20170807_143916.jpg'
duration 2
file '/storage/emulated/0/DCIM/Pic/P_20170305_142948.jpg'
duration 5
file '/storage/emulated/0/DCIM/Camera/P_20170305_142939.jpg'
duration 6
file '/storage/emulated/0/DCIM/Pic/P_20170305_142818.jpg'
duration 2
Command
String command[] = {"-y", "-f", "concat", "-safe", "0", "-i", textFile.getAbsolutePath(), "-vsync", "vfr", "-pix_fmt", "yuv420p", dest.getAbsolutePath()};
where textFile.getAbsolutePath() is the absolute path of your text file
Check out this ffmpeg doc for more info

Categories

Resources