I am encoding raw data on Android using ffmpeg libraries. The native code reads the audio data from an external device and encodes it into AAC format in an mp4 container. I am finding that the audio data is successfully encoded (I can play it with Groove Music, my default Windows audio player). But the metadata, as reported by ffprobe, has an incorrect duration of 0.05 secs - it's actually several seconds long. Also the bitrate is reported wrongly as around 65kbps even though I specified 192kbps.
I've tried recordings of various durations but the result is always similar - the (very small) duration and bitrate. I've tried various other audio players such as Quicktime but they play only the first 0.05 secs or so of the audio.
I've removed error-checking from the following. The actual code checks every call and no problems are reported.
Initialisation:
void AudioWriter::initialise( const char *filePath )
{
AVCodecID avCodecID = AVCodecID::AV_CODEC_ID_AAC;
int bitRate = 192000;
char *containerFormat = "mp4";
int sampleRate = 48000;
int nChannels = 2;
mAvCodec = avcodec_find_encoder(avCodecID);
mAvCodecContext = avcodec_alloc_context3(mAvCodec);
mAvCodecContext->codec_id = avCodecID;
mAvCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
mAvCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
mAvCodecContext->bit_rate = bitRate;
mAvCodecContext->sample_rate = sampleRate;
mAvCodecContext->channels = nChannels;
mAvCodecContext->channel_layout = AV_CH_LAYOUT_STEREO;
avcodec_open2( mAvCodecContext, mAvCodec, nullptr );
mAvFormatContext = avformat_alloc_context();
avformat_alloc_output_context2(&mAvFormatContext, nullptr, containerFormat, nullptr);
mAvFormatContext->audio_codec = mAvCodec;
mAvFormatContext->audio_codec_id = avCodecID;
mAvOutputStream = avformat_new_stream(mAvFormatContext, mAvCodec);
avcodec_parameters_from_context(mAvOutputStream->codecpar, mAvCodecContext);
if (!(mAvFormatContext->oformat->flags & AVFMT_NOFILE))
{
avio_open(&mAvFormatContext->pb, filePath, AVIO_FLAG_WRITE);
}
if ( mAvFormatContext->oformat->flags & AVFMT_GLOBALHEADER )
{
mAvCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avformat_write_header(mAvFormatContext, NULL);
mAvAudioFrame = av_frame_alloc();
mAvAudioFrame->nb_samples = mAvCodecContext->frame_size;
mAvAudioFrame->format = mAvCodecContext->sample_fmt;
mAvAudioFrame->channel_layout = mAvCodecContext->channel_layout;
av_samples_get_buffer_size(NULL, mAvCodecContext->channels, mAvCodecContext->frame_size,
mAvCodecContext->sample_fmt, 0);
av_frame_get_buffer(mAvAudioFrame, 0);
av_frame_make_writable(mAvAudioFrame);
mAvPacket = av_packet_alloc();
}
Encoding:
// SoundRecording is a custom class with the raw samples to be encoded
bool AudioWriter::encodeToContainer( SoundRecording *soundRecording )
{
int ret;
int frameCount = mAvCodecContext->frame_size;
int nChannels = mAvCodecContext->channels;
float *buf = new float[frameCount*nChannels];
while ( soundRecording->hasReadableData() )
{
//Populate the frame
int samplesRead = soundRecording->read( buf, frameCount*nChannels );
// Planar data
int nFrames = samplesRead/nChannels;
for ( int i = 0; i < nFrames; ++i )
{
for (int c = 0; c < nChannels; ++c )
{
samples[c][i] = buf[nChannels*i +c];
}
}
// Fill a gap at the end with silence
if ( samplesRead < frameCount*nChannels )
{
for ( int i = samplesRead; i < frameCount*nChannels; ++i )
{
for (int c = 0; c < nChannels; ++c )
{
samples[c][i] = 0.0;
}
}
}
encodeFrame( mAvAudioFrame ) )
}
finish();
}
bool AudioWriter::encodeFrame( AVFrame *frame )
{
//send the frame for encoding
int ret;
if ( frame != nullptr )
{
frame->pts = mAudFrameCounter++;
}
avcodec_send_frame(mAvCodecContext, frame );
while (ret >= 0)
{
ret = avcodec_receive_packet(mAvCodecContext, mAvPacket);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF )
{
break;
}
else
if (ret < 0) {
return false;
}
av_packet_rescale_ts(mAvPacket, mAvCodecContext->time_base, mAvOutputStream->time_base);
mAvPacket->stream_index = mAvOutputStream->index;
av_interleaved_write_frame(mAvFormatContext, mAvPacket);
av_packet_unref(mAvPacket);
}
return true;
}
void AudioWriter::finish()
{
// Flush by sending a null frame
encodeFrame( nullptr );
av_write_trailer(mAvFormatContext);
}
Since the resultant file contains the recorded music, the code to manipulate the audio data seems to be correct (unless I am overwriting other memory somehow).
The inaccurate duration and bitrate suggest that information concerning time is not being properly managed. I set the pts of the frames using a simple increasing integer. I'm unclear what the code that sets the timestamp and stream index achieves - and whether it's even necessary: I copied it from supposedly working code but I've seen other code without it.
Can anyone see what I'm doing wrong?
The timestamp need to be correct. Set the time_base to 1/sample_rate and increment the timestamp by 1024 each frame. Note: 1024 is aac specific. If you change codecs, you need to change the frame size.
Related
i went into a problem as mentioned bellow;
i use android frameworks's Mediacodec API to encode camera preview flow from opengl texture,and wish to generate a ts file;
since Mediacodec does not support generating ts file, so i use ffmpeg to do so;
every thing is OK, the ts file is successfully generated, and it can be played by media player either on my android or pc, but there are still 2 problems bothering me;
the video file does not have a image co, wherever the xxx.ts is shown on my android phone or PC;
when i move the xxx.ts onto my pc(windows), right click the file to check its attribute, the attribute frame-rate is also empty;
Does any one have ideas on these isssues?
the encoder mediacodec configuration is as bellow:
mBufferInfo = new MediaCodec.BufferInfo();
MediaFormat format = MediaFormat.createVideoFormat(MIME_TYPE, width, height);
format.setInteger(MediaFormat.KEY_COLOR_FORMAT,
MediaCodecInfo.CodecCapabilities.COLOR_FormatSurface);
format.setInteger(MediaFormat.KEY_BIT_RATE, 4 * 1024 * 1024);
format.setInteger(MediaFormat.KEY_FRAME_RATE, 25);
format.setInteger(MediaFormat.KEY_I_FRAME_INTERVAL, 1);
mEncoder = MediaCodec.createEncoderByType("video/avc");
mEncoder.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
mInputSurface = mEncoder.createInputSurface();
mEncoder.start();
mFFmpegMuxer = new FFmpegMuxer();
mFFmpegMuxer.prepare();
FFmepgMuxer->prepare() which is also known as ffmpeg muxer's configuration is as bellow:
mOutputFormatCtx = avformat_alloc_context();
AVOutputFormat * outputFormat = av_guess_format(nullptr, "xxx.ts", nullptr);
mOutputFormatCtx->oformat = outputFormat;
AVStream *stream = avformat_new_stream(mOutputFormatCtx, nullptr);
stream->codecpar->codec_id = AV_CODEC_ID_H264;
stream->codecpar->format = AV_PIX_FMT_RGBA;
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->codec_tag = av_codec_get_tag(mOutputFormatCtx->oformat->codec_tag,
AV_CODEC_ID_H264);
stream->codecpar->width = 1080;
stream->codecpar->height = 1200;
stream->codecpar->bit_rate = 4 * 1024 * 1024;
stream->time_base.num = 1;
stream->time_base.den = 25;
mOutputStreamInd = stream->index;
if (mOutputFormatCtx->oformat->flags & AVFMT_GLOBALHEADER) {
mOutputFormatCtx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avio_open2(&mOutputFormatCtx->pb, outputPath.c_str(), AVIO_FLAG_WRITE, nullptr, nullptr);
av_dict_set(&opts, "movflags", "faststart", 0);
avformat_write_header(mOutputFormatCtx, &opts);
av_dict_free(&opts);
everytime the mediacodec sucessfully encoded a packet, the packet will be enqueue into ffmpeg:
void FFmpegMuxer::enqueueBuffer(uint8_t *data, int offset, int size, long pts, bool keyFrame) {
if (mPacket == nullptr) { mPacket = av_packet_alloc(); }
av_init_packet(mPacket);
mPacket->stream_index = mOutputStreamInd;
mPacket->size = size;
mPacket->data = data + offset;
if (mRecStartPts == 0) {
mRecStartPts = pts;
mPacket->pts = 0;
mPacket->dts = 0;
} else {
int64_t dstPts = pts - mRecStartPts;
dstPts = av_rescale_q(dstPts, AV_TIME_BASE_Q,
mOutputFormatCtx->streams[mOutputStreamInd]->time_base);
mPacket->pts = dstPts;
mPacket->dts = dstPts;
}
if (keyFrame) {
mPacket->flags = AV_PKT_FLAG_KEY;
}
int status = av_interleaved_write_frame(mOutputFormatCtx, mPacket);
if (status < 0) {
..........
}
av_packet_unref(mPacket);
}
when the recording is required to be stopped, the code is as bellow:
av_write_trailer(mOutputFormatCtx);
the code snippets are information i could supply, can any one find out what is wrong?
finally i find out what is wrong;
the ts is real-time bit flow, so i need to insert sps and pps before every i frame;
that is the answer.
I have a mp4 video file in my sd card. I would like to extract the audio from the video and then save the extracted audio as a separate file on the sd card using MediaExtractor Api. Here is the code I've tried:
MediaExtractor extractor = new MediaExtractor();
extractor.setDataSource(MEDIA_PATH_To_File_On_SDCARD);
for (i = 0; i < extractor.getTrackCount(); i++) {
MediaFormat format = extractor.getTrackFormat(i);
String mime = format.getString(MediaFormat.KEY_MIME);
if (mime.startsWith("audio/")) {
extractor.selectTrack(i);
decoder = MediaCodec.createDecoderByType(mime);
if(decoder != null)
{
decoder.configure(format, null, null, 0);
}
break;
}
}
Am stuck here I have no idea of how to take the selected audio track and save it to the sd card.
Late to the party, this can be done by using MediaExtractor and MediaMuxer Apis together, Check out the working URL from below,
/**
* #param srcPath the path of source video file.
* #param dstPath the path of destination video file.
* #param startMs starting time in milliseconds for trimming. Set to
* negative if starting from beginning.
* #param endMs end time for trimming in milliseconds. Set to negative if
* no trimming at the end.
* #param useAudio true if keep the audio track from the source.
* #param useVideo true if keep the video track from the source.
* #throws IOException
*/
#SuppressLint("NewApi")
public void genVideoUsingMuxer(String srcPath, String dstPath, int startMs, int endMs, boolean useAudio, boolean useVideo) throws IOException {
// Set up MediaExtractor to read from the source.
MediaExtractor extractor = new MediaExtractor();
extractor.setDataSource(srcPath);
int trackCount = extractor.getTrackCount();
// Set up MediaMuxer for the destination.
MediaMuxer muxer;
muxer = new MediaMuxer(dstPath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
// Set up the tracks and retrieve the max buffer size for selected
// tracks.
HashMap<Integer, Integer> indexMap = new HashMap<Integer, Integer>(trackCount);
int bufferSize = -1;
for (int i = 0; i < trackCount; i++) {
MediaFormat format = extractor.getTrackFormat(i);
String mime = format.getString(MediaFormat.KEY_MIME);
boolean selectCurrentTrack = false;
if (mime.startsWith("audio/") && useAudio) {
selectCurrentTrack = true;
} else if (mime.startsWith("video/") && useVideo) {
selectCurrentTrack = true;
}
if (selectCurrentTrack) {
extractor.selectTrack(i);
int dstIndex = muxer.addTrack(format);
indexMap.put(i, dstIndex);
if (format.containsKey(MediaFormat.KEY_MAX_INPUT_SIZE)) {
int newSize = format.getInteger(MediaFormat.KEY_MAX_INPUT_SIZE);
bufferSize = newSize > bufferSize ? newSize : bufferSize;
}
}
}
if (bufferSize < 0) {
bufferSize = DEFAULT_BUFFER_SIZE;
}
// Set up the orientation and starting time for extractor.
MediaMetadataRetriever retrieverSrc = new MediaMetadataRetriever();
retrieverSrc.setDataSource(srcPath);
String degreesString = retrieverSrc.extractMetadata(MediaMetadataRetriever.METADATA_KEY_VIDEO_ROTATION);
if (degreesString != null) {
int degrees = Integer.parseInt(degreesString);
if (degrees >= 0) {
muxer.setOrientationHint(degrees);
}
}
if (startMs > 0) {
extractor.seekTo(startMs * 1000, MediaExtractor.SEEK_TO_CLOSEST_SYNC);
}
// Copy the samples from MediaExtractor to MediaMuxer. We will loop
// for copying each sample and stop when we get to the end of the source
// file or exceed the end time of the trimming.
int offset = 0;
int trackIndex = -1;
ByteBuffer dstBuf = ByteBuffer.allocate(bufferSize);
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
muxer.start();
while (true) {
bufferInfo.offset = offset;
bufferInfo.size = extractor.readSampleData(dstBuf, offset);
if (bufferInfo.size < 0) {
Log.d(TAG, "Saw input EOS.");
bufferInfo.size = 0;
break;
} else {
bufferInfo.presentationTimeUs = extractor.getSampleTime();
if (endMs > 0 && bufferInfo.presentationTimeUs > (endMs * 1000)) {
Log.d(TAG, "The current sample is over the trim end time.");
break;
} else {
bufferInfo.flags = extractor.getSampleFlags();
trackIndex = extractor.getSampleTrackIndex();
muxer.writeSampleData(indexMap.get(trackIndex), dstBuf, bufferInfo);
extractor.advance();
}
}
}
muxer.stop();
muxer.release();
return;
}
You can use the above-mentioned method by using a single line:
genVideoUsingMuxer(videoFile, originalAudio, -1, -1, true, false)
Also, read the comments to use this method more efficiently.
GIST: https://gist.github.com/ArsalRaza/132a6e99d59aa80b9861ae368bc786d0
take a look at my post Decoding Video and Encoding again by Mediacodec gets a corrupted file where there is an example (just take care about the answer too).
you have to use a MediaMuxer, call AddTrack for the video track and write the data to this track to the muxer after encoding each frame. You have to add track for audio too. If you just want only audio, ignore the video part, and just save the data to the muxer related to the audio. You can see some examples in grafika page, one of them could be this: https://github.com/google/grafika/
Also you can find more examples here: http://www.bigflake.com/mediacodec/
Thanks
I am trying to make a call recording app in Android. I am using loudspeaker to record both uplink and downlink audio. The only problem I am facing is the volume is too low. I've increased the volume of device using AudioManager to max and it can't go beyond that.
I've first used MediaRecorder, but since it had limited functions and provides compressed audio, I've tried with AudioRecorder. Still I havn't figured out how to increase the audio. I've checked on projects on Github too, but it's of no use. I've searched on stackoverflow for last two weeks, but couldn't find anything at all.
I am quite sure that it's possible, since many other apps are doing it. For instance Automatic Call recorder does that.
I understand that I have to do something with the audio buffer, but I am not quite sure what needs to be done on that. Can you guide me on that.
Update:-
I am sorry that I forgot to mention that I am already using Gain. My code is almost similar to RehearsalAssistant (in fact I derived it from there). The gain doesn't work for more than 10dB and that doesn't increase the audio volume too much. What I wanted is I should be able to listen to the audio without putting my ear on the speaker which is what lacking in my code.
I've asked a similar question on functioning of the volume/loudness at SoundDesign SE here. It mentions that the Gain and loudness is related but it doesn't set the actual loudness level. I am not sure how things work, but I am determined to get the loud volume output.
You obviously have the AudioRecord stuff running, so I skip the decision for sampleRate and inputSource. The main point is that you need to appropriately manipulate each sample of your recorded data in your recording loop to increase the volume. Like so:
int minRecBufBytes = AudioRecord.getMinBufferSize( sampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT );
// ...
audioRecord = new AudioRecord( inputSource, sampleRate, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, minRecBufBytes );
// Setup the recording buffer, size, and pointer (in this case quadruple buffering)
int recBufferByteSize = minRecBufBytes*2;
byte[] recBuffer = new byte[recBufferByteSize];
int frameByteSize = minRecBufBytes/2;
int sampleBytes = frameByteSize;
int recBufferBytePtr = 0;
audioRecord.startRecording();
// Do the following in the loop you prefer, e.g.
while ( continueRecording ) {
int reallySampledBytes = audioRecord.read( recBuffer, recBufferBytePtr, sampleBytes );
int i = 0;
while ( i < reallySampledBytes ) {
float sample = (float)( recBuffer[recBufferBytePtr+i ] & 0xFF
| recBuffer[recBufferBytePtr+i+1] << 8 );
// THIS is the point were the work is done:
// Increase level by about 6dB:
sample *= 2;
// Or increase level by 20dB:
// sample *= 10;
// Or if you prefer any dB value, then calculate the gain factor outside the loop
// float gainFactor = (float)Math.pow( 10., dB / 20. ); // dB to gain factor
// sample *= gainFactor;
// Avoid 16-bit-integer overflow when writing back the manipulated data:
if ( sample >= 32767f ) {
recBuffer[recBufferBytePtr+i ] = (byte)0xFF;
recBuffer[recBufferBytePtr+i+1] = 0x7F;
} else if ( sample <= -32768f ) {
recBuffer[recBufferBytePtr+i ] = 0x00;
recBuffer[recBufferBytePtr+i+1] = (byte)0x80;
} else {
int s = (int)( 0.5f + sample ); // Here, dithering would be more appropriate
recBuffer[recBufferBytePtr+i ] = (byte)(s & 0xFF);
recBuffer[recBufferBytePtr+i+1] = (byte)(s >> 8 & 0xFF);
}
i += 2;
}
// Do other stuff like saving the part of buffer to a file
// if ( reallySampledBytes > 0 ) { ... save recBuffer+recBufferBytePtr, length: reallySampledBytes
// Then move the recording pointer to the next position in the recording buffer
recBufferBytePtr += reallySampledBytes;
// Wrap around at the end of the recording buffer, e.g. like so:
if ( recBufferBytePtr >= recBufferByteSize ) {
recBufferBytePtr = 0;
sampleBytes = frameByteSize;
} else {
sampleBytes = recBufferByteSize - recBufferBytePtr;
if ( sampleBytes > frameByteSize )
sampleBytes = frameByteSize;
}
}
Thanks to Hartmut and beworker for the solution. Hartmut's code did worked at near 12-14 dB. I did merged the code from the sonic library too to increase volume, but that increase too much noise and distortion, so I kept the volume at 1.5-2.0 and instead tried to increase gain. I got decent sound volume which doesn't sound too loud in phone, but when listened on a PC sounds loud enough. Looks like that's the farthest I could go.
I am posting my final code to increase the loudness. Be aware that using increasing mVolume increases too much noise. Try to increase gain instead.
private AudioRecord.OnRecordPositionUpdateListener updateListener = new AudioRecord.OnRecordPositionUpdateListener() {
#Override
public void onPeriodicNotification(AudioRecord recorder) {
aRecorder.read(bBuffer, bBuffer.capacity()); // Fill buffer
if (getState() != State.RECORDING)
return;
try {
if (bSamples == 16) {
shBuffer.rewind();
int bLength = shBuffer.capacity(); // Faster than accessing buffer.capacity each time
for (int i = 0; i < bLength; i++) { // 16bit sample size
short curSample = (short) (shBuffer.get(i) * gain);
if (curSample > cAmplitude) { // Check amplitude
cAmplitude = curSample;
}
if(mVolume != 1.0f) {
// Adjust output volume.
int fixedPointVolume = (int)(mVolume*4096.0f);
int value = (curSample*fixedPointVolume) >> 12;
if(value > 32767) {
value = 32767;
} else if(value < -32767) {
value = -32767;
}
curSample = (short)value;
/*scaleSamples(outputBuffer, originalNumOutputSamples, numOutputSamples - originalNumOutputSamples,
mVolume, nChannels);*/
}
shBuffer.put(curSample);
}
} else { // 8bit sample size
int bLength = bBuffer.capacity(); // Faster than accessing buffer.capacity each time
bBuffer.rewind();
for (int i = 0; i < bLength; i++) {
byte curSample = (byte) (bBuffer.get(i) * gain);
if (curSample > cAmplitude) { // Check amplitude
cAmplitude = curSample;
}
bBuffer.put(curSample);
}
}
bBuffer.rewind();
fChannel.write(bBuffer); // Write buffer to file
payloadSize += bBuffer.capacity();
} catch (IOException e) {
e.printStackTrace();
Log.e(NoobAudioRecorder.class.getName(), "Error occured in updateListener, recording is aborted");
stop();
}
}
#Override
public void onMarkerReached(AudioRecord recorder) {
// NOT USED
}
};
simple use MPEG_4 format
To increase the call recording volume use AudioManager as follows:
int deviceCallVol;
AudioManager audioManager;
Start Recording:
audioManager = (AudioManager)context.getSystemService(Context.AUDIO_SERVICE);
//get the current volume set
deviceCallVol = audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL);
//set volume to maximum
audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, audioManager.getStreamMaxVolume(AudioManager.STREAM_VOICE_CALL), 0);
recorder.setAudioSource(MediaRecorder.AudioSource.VOICE_CALL);
recorder.setOutputFormat(MediaRecorder.OutputFormat.MPEG_4);
recorder.setAudioEncoder(MediaRecorder.AudioEncoder.AAC);
recorder.setAudioEncodingBitRate(32);
recorder.setAudioSamplingRate(44100);
Stop Recording:
//revert volume to initial state
audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, deviceCallVol, 0);
In my app I use an open source sonic library. Its main purpose is to speed up / slow down speech, but besides this it allows to increase loudness too. I apply it to playback, but it must work for recording similarly. Just pass your samples through it before compressing them. It has a Java interface too. Hope this helps.
I'm trying to set up OpenSL AudioPlayer to use memory I've allocated to playback a wav file. I want to do this so I can have multiple AudioPlayers that share the same data and conserve memory.
I've tried to give openSL the entire file and tell it that it is a WAVE with format_mime
SLDataLocator_Address loc_fd = {SL_DATALOCATOR_ADDRESS, data, size};
SLDataFormat_MIME format_mime = { SL_DATAFORMAT_MIME, (SLchar*)"audio/x-wav",SL_CONTAINERTYPE_WAV};
SLDataSource audioSrc = { &loc_fd, &format_mime };
// configure audio sink
SLDataLocator_OutputMix loc_outmix = { SL_DATALOCATOR_OUTPUTMIX,outputMixObject };
SLDataSink audioSnk = { &loc_outmix, 0 };
// create audio player
const SLInterfaceID ids[2] = { SL_IID_SEEK, SL_IID_PLAYBACKRATE };
const SLboolean req[2] = { SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE };
result = (*engineEngine)->CreateAudioPlayer(engineEngine,&uriPlayerObject[cntSOUND],&audioSrc, &audioSnk, 0, ids, req);
and I have parsed the WAVE data myself and loaded format_pcm
SLDataFormat_PCM format_pcm;
format_pcm.formatType = SL_DATAFORMAT_PCM;
char* wavParser = isWAVE(data);
if(wavParser == NULL)
{
Log("NOT A WAVE!");
return -1;
}
char* fmtChunk = getChunk("fmt ", data, size);
parsefmtChunk(fmtChunk, &format_pcm);
char* dataChunk = getChunk("data",data, size);
dataChunk += 4;
unsigned int dataSize = *((unsigned int*)dataChunk);
dataChunk += 4;
format_pcm.channelMask = 0;
format_pcm.containerSize = 16;
format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
loc_fd.pAddress = dataChunk;
loc_fd.length = dataSize;
The parsefmtChunk function is
void parsefmtChunk(char* fmtchunk, SLDataFormat_PCM* pcm)
{
char* data = fmtchunk + 8;
unsigned short audioFormat = *((unsigned short*)data);
if(audioFormat != 1)
{
Log("Not PCM!");
Log("Reached Line:%d in File %s", __LINE__, __FILE__);
return;
}
data += 2;
pcm->numChannels = *((unsigned short*)data);
data += 2;
pcm->samplesPerSec = *((unsigned int*)data);
data += 4;
//Byte Rate
data += 4;
//Block Align
data += 2;
//BitsPerSample
pcm->bitsPerSample = *((unsigned short*)data);
(Are Byte Rate and Block Align supposed to be used somehow to fill out the pcm struct?)
but whenever I create the audioplayer I get SL_RESULT_CONTENT_UNSUPPORTED
This is what I log from my parsefmt function
Channels:2
samplesPerSec:44100
bitsPerSample:16
from android-ndk-r8b/docs/opensles/index.html
PCM data format
The PCM data format can be used with buffer queues only.
So SLDataFormat_PCM CANNOT be used with SLDataLocator_Address like I assumed.
I can do what I want with a Buffer Queue instead by using just one big queue like so
bufferqueueitf->Enqueue(bufferqueueitf,dataChunk,dataSize);
Have you tried this?
SLDataFormat_MIME format_mime = {SL_DATAFORMAT_MIME, NULL, SL_CONTAINERTYPE_UNSPECIFIED};
The Android implementation of OpenSL ES isn't totally compliant and http://mobilepearls.com/labs/native-android-api/ndk/docs/opensles/ recommends the following:
The Android implementation of OpenSL ES requires that mimeType be initialized to either NULL or a valid UTF-8 string, and that containerType be initialized to a valid value. In the absence of other considerations, such as portability to other implementations, or content format which cannot be identified by header, we recommend that you set the mimeType to NULL and containerType to SL_CONTAINERTYPE_UNSPECIFIED.
Also, make sure you're giving it a valid URI.
i want to send an audio stream from PC (C++ application, using FMOD-API to decode audio data and send via UDP Socket) to an android device. The communication already works and i can hear "sound" (100ms sound, followed by 900ms silence, alternating) on the android.
I don't know why the sound is stuttering - on the PC the same audio stream is played fine in nice quality. I think the problem is on the android..
Here is the code:
DatagramSocket sock = new DatagramSocket(12345);
byte []bSockBuffer = new byte[1024];
byte []bRecvBufTmp;
int iAudioBufSize, iCurAudioBufPos = 0;
sock.setReceiveBufferSize(bSockBuffer.length);
// Audio Stream initialisieren:
iAudioBufSize = AudioTrack.getMinBufferSize(44100, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, 44100, AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_16BIT, iAudioBufSize, AudioTrack.MODE_STREAM);
track.play();
while (true)
{
DatagramPacket pack = new DatagramPacket(bSockBuffer, bSockBuffer.length);
// Paket empfangen:
sock.receive(pack);
track.write(pack.getData(), 0, pack.getLength());
}
I'm sure to set up 'AudioTrack' object correctly, settings compare to my settings in the c++ application.
An other step was pre-buffering the received socket-data in a temporary 'byte[]' variable and writing it to the AudioTrack-object when the size of the buffer 'iAudioBufSize' was reached.
This did not helped.
Any idears?
Thanks
[EDIT]
Code of C++ Application, used sample "manualdecode" of FMOD API examples:
FMOD_RESULT F_CALLBACK pcmreadcallback(FMOD_SOUND *sound, void *data, unsigned int datalen)
{
CCtrlSocket *cClientTmp = /* Obtaining target client sock here */;
FMOD_RESULT result;
unsigned int read, uSentTmp, uSizeTmp;
EnterCriticalSection(&decodecrit);
if (!decodesound)
return (FMOD_ERR_FILE_EOF);
result = decodesound->readData(data, datalen, &read);
if (result == FMOD_ERR_FILE_EOF)
{
// Handle looping:
decodesound->seekData(0);
datalen -= read;
result = decodesound->readData((char*) data + read, datalen, &read);
}
// Split package in multiple parts:
uSentTmp = 0;
do
{
uSizeTmp = (read - uSentTmp);
if (uSizeTmp > 1024)
uSizeTmp = 1024;
uSentTmp += cClientTmp->SendAudioData((char*) data + uSentTmp, uSizeTmp);
} while (uSentTmp < read);
LeaveCriticalSection(&decodecrit);
return (FMOD_OK);
}
I've done this problem.
The mess was an entry in a logfile that has cost lots of time creating a lag :(
Now i can hear the streamed music on my android client. But there are still some lags. I've experimented a LOT of values for socket and AudioTrack buffers.
I have compared the amount of sent and received bytes: In 20 secs sending 9170000 bytes of data results in receiving 8120000 bytes on android device. At first the stream is played fast for 3 secs (that means buffer's full?). After 30 secs the stream lags (which means buffer's empty?).
In general the music quality is very good, but there is a sizzling noise all the time (which indicates lost socket packages?).
My 'PlaybackStart()' function has changed - i'm not using a PCM read callback anymore:
FMOD_RESULT CAudioStream::PlaybackStart()
{
CCtrlSocket *cClientTmp;
unsigned int read, uSentTmp, uSizeTmp;
FMOD_RESULT result;
result = system->createStream("C:\\test.mp3", FMOD_OPENONLY | FMOD_ACCURATETIME, 0, &sound);
if(result != FMOD_OK)
return (result);
int iChannels, iBits;
FMOD_SOUND_FORMAT fFormat;
FMOD_SOUND_TYPE fType;
result = sound->getFormat(&fType, &fFormat, &iChannels, &iBits);
if(result != FMOD_OK)
return (result);
void *data;
unsigned int length = 0;
int iSampleSec = 1; // Playtime
int iSampleSize = (44100 * 2 * sizeof(signed short) * iSampleSec);
int iSleep = 6; // Sleep after sending a package
DWORD dSleepTotal;
result = sound->getLength(&length, FMOD_TIMEUNIT_PCMBYTES);
if(result != FMOD_OK)
return (result);
data = malloc(iSampleSize);
if (!data)
return (FMOD_RESULT_FORCEINT);
cClientTmp = (CCtrlSocket*) CCtrlSocket::cServerSock.GetClientSock(CCtrlSocket::cServerSock.GetClientSockCount() - 1);
do
{
result = sound->readData((char*) data, iSampleSize, &read);
if ((result != FMOD_OK) && (result != FMOD_ERR_FILE_EOF))
ASSERT(FALSE);
else if (read > 0)
{
dSleepTotal = 0;
for (int i = 0; i < read; i += NET_SVR_AUDIO_BUFFER)
{
// MIN_VAL_LIMITED ((MIN_VAL(VAL1, VAL2) <= LIMIT) ? LIMIT : MIN_VAL(VAL1, VAL2))
cClientTmp->SendAudioData((char*) data + i, MIN_VAL_LIMITED(NET_SVR_AUDIO_BUFFER, (read - i), 0));
// Sleep after sending every package:
Sleep(iSleep);
dSleepTotal += iSleep;
}
if (dSleepTotal < (iSampleSec * 1000))
{
dSleepTotal = (iSampleSec * 1000) - dSleepTotal;
// Sleep after sending every second playtime:
Sleep(dSleepTotal);
}
}
} while (read > 0);
result = sound->release();
if(result != FMOD_OK)
return (result);
result = system->close();
if(result != FMOD_OK)
return (result);
result = system->release();
if(result != FMOD_OK)
return (result);
return (result);
}
I have experimented with different sleep-timings, too.