Audio playing back at the wrong speed using FFmpeg on Android - android

General problem in the following:
I decode the audio as follows:
ReSampleContext* rsc = av_audio_resample_init(
1, aCodecCtx->channels,
aCodecCtx->sample_rate, aCodecCtx->sample_rate,
av_get_sample_fmt("u8"), aCodecCtx->sample_fmt,
16, 10, 0, 1);
while (av_read_frame(pFormatCtx, &packet)>= 0) {
if (aCodecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {
int data_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 2;
int size=packet.size;
int decoded = 0;
while(size > 0) {
int len = avcodec_decode_audio3(aCodecCtx, pAudioBuffer,
&data_size, &packet);
//Сonvert audio to sample 8bit
out_size = audio_resample(rsc, outBuffer, pAudioBuffer, len);
jbyte *bytes = (*env)->GetByteArrayElements(env, array, NULL);
memcpy(bytes, outBuffer, out_size);
(*env)->ReleaseByteArrayElements(env, array, bytes, 0);
(*env)->CallStaticVoidMethod(env, cls, mid, array, out_size, number);
size -= len;
number++;
}
}
}
Next release it AudioTrack. After that, I hear that song that was necessary, but with noise and speed of 2 times larger. In what may be the problem?
UPDATE:
This is Java code:
public static AudioTrack track;
public static byte[] bytes;
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
int bufSize = 2048;
track = new AudioTrack(AudioManager.STREAM_MUSIC, 44100, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_8BIT, bufSize, AudioTrack.MODE_STREAM);
bytes = new byte[bufSize];
Thread mAudioThread = new Thread(new Runnable() {
public void run() {
int res = main(2, "/sdcard/muzika_iz_reklami_bmw_5_series_-_bmw_5_series.mp3", bytes);
System.out.println(res);
}
});
mAudioThread.setPriority(Thread.MAX_PRIORITY);
mAudioThread.start();
}
private static void play(byte[] play, int length, int p) {
if (p==0){
track.play();
}
track.write(play, 0, length);
}

Perhaps your AudioTrack is expecting stereo data but you are sending it mono. You could try setting your AudioTrack channel configuration to CHANNEL_OUT_MONO.

This could be a problem of sampling rate mismatch. May be you are creating audioTrack with a lesser sampler rate compared to actual rate.
It could be also a problem with the AudioTrack channel configuration, as Matthew mentioned.
May be you should try something like I have answered in this question.
Because the way you are interacting with JNI from Java, I am not sure, whether that will work. I have little knowledge about JNI. But my code works with me and which is what I am currently using with my app.

Related

Android: confused about how to get amplitude of a frequency generated and played by AudioTrack

In my app I generate list of sounds with different frequencies, for example 1000Hz, 2000Hz, 4000Hz ... for left and right channel:
private AudioTrack generateTone(Ear ear) {
// fill out the array
int numSamples = SAMPLE_RATE * getDurationInSeconds();
double[] sample = new double[numSamples];
byte[] generatedSnd = new byte[2 * numSamples];
for (int i = 0; i < numSamples; ++i) {
sample[i] = Math.sin(2 * Math.PI * i / (SAMPLE_RATE / getLatestFreqInHz()));
}
// convert to 16 bit pcm sound array
// assumes the sample buffer is normalised.
int idx = 0;
for (final double dVal : sample) {
// scale to maximum amplitude
final short val = (short) ((dVal * 32767));
// in 16 bit wav PCM, first byte is the low order byte
generatedSnd[idx++] = (byte) (val & 0x00ff);
generatedSnd[idx++] = (byte) ((val & 0xff00) >>> 8);
}
AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
SAMPLE_RATE, AudioFormat.CHANNEL_OUT_MONO,
AudioFormat.ENCODING_PCM_16BIT, numSamples,
AudioTrack.MODE_STATIC);
audioTrack.write(generatedSnd, 0, generatedSnd.length);
int channel;
if (ear == Ear.LEFT) {
audioTrack.setStereoVolume(1.0f, 0.0f);
} else if (ear == Ear.RIGHT) {
audioTrack.setStereoVolume(0.0f, 1.0f);
}
return audioTrack;
}
I use this code to control volume:
audioManager = (AudioManager) context.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
MAX_VOLUME = audioManager.getStreamMaxVolume(STREAM_MUSIC);
MIN_VOLUME = audioManager.getStreamMinVolume(STREAM_MUSIC);
APP_DEFAULT_VOLUME = (MAX_VOLUME + MIN_VOLUME) /2 ;
#Override
public void setToAppDefaultVolume() {
Timber.tag(TAG).d("Resetting to app default sound volume.");
audioManager.setStreamVolume(STREAM_MUSIC, APP_DEFAULT_VOLUME, 0);
}
#Override
public void increaseVolume() {
if (!isReachedMaxVolume()) {
Timber.tag(TAG).d("Increasing device sound volume from %d to %d", currentVolumeLevel(), currentVolumeLevel() + STEP);
audioManager.setStreamVolume(STREAM_MUSIC, currentVolumeLevel() + STEP, 0);
} else {
Timber.tag(TAG).d("Reached the maximum device volume.");
}
}
#Override
public void decreaseVolume() {
if (!isReachedMinVolume()) {
Timber.tag(TAG).d("Decreasing device sound volume from %d to %d", currentVolumeLevel(), currentVolumeLevel() - STEP);
audioManager.setStreamVolume(STREAM_MUSIC, currentVolumeLevel() - STEP, 0);
} else {
Timber.tag(TAG).d("Reached the preferred minimum volume");
}
}
I start playing each tone (frequency) with APP_DEFAULT_VOLUME and gradually increase tone. When user confirm he/she heard specific tone with specific volume, I want to calculate it's amplitude and log it for latter so I can review user hearing ...
But i have no clue how to do so!
All solutions I found was about reading data from microphone and calculate the amplitude to visualize it on screen ...
My scenario is much simpler. I have device volume recorded, frequency is fixed and recorded, and Audio is generated dynamically and is not a file.
Will anyone help me with this scenario ?
Thanks in advance.

Audio routing between Android and PC produces white noise

I am trying to send audio between windows and android, I was successfully able to do that windows to windows but when I stream audio from android, it produces a white noise only. I think it is an issue with the AudioFormat in android and Windows because when I changed the sample Bits to 8 I guess, I heard the voice in one side of my headphones but then it went away too.
On Android Side
int BUFFER_MS = 15; // do not buffer more than BUFFER_MS milliseconds
int bufferSize = 48000 * 2 * BUFFER_MS / 1000;
AudioTrack audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, 48000, 2,
AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);
byte[] buffer = new byte[bufferSize];
int bytesRead;
audioTrack.play();
while (socket.isConnected()) {
bytesRead = inputStream.read(buffer, 0, buffer.length);
audioTrack.write(buffer,0,bytesRead);
}
On Windows Side
AudioFormat format = getAudioFormat();
DataLine.Info info = new DataLine.Info(TargetDataLine.class, format);
// checks if system supports the data line
if (!AudioSystem.isLineSupported(info)) {
throw new LineUnavailableException(
"The system does not support the specified format.");
}
TargetDataLine audioLine = AudioSystem.getTargetDataLine(format);
audioLine.open(format);
audioLine.start();
byte[] buffer = new byte[BUFFER_SIZE];
int bytesRead;
while (socket.isConnected()) {
bytesRead = audioLine.read(buffer, 0, buffer.length);
outputStream.write(buffer,0,bytesRead);
}
and getAudioFormat function is
AudioFormat getAudioFormat() {
float sampleRate = 48000;
int sampleSizeInBits = 16;
int channels = 2;
boolean signed = true;
boolean bigEndian = true;
return new AudioFormat(sampleRate, sampleSizeInBits, channels, signed,
bigEndian);
}
Only hearing a white noise, if someone can help please do.
Okayyyy So I found out the problem. I just had to put bigEndian to false -_-
It's the byte order difference. I don't understand why it's different in android and pc but seems like it does the trick.

MediaCodec decode AAC audio chunks from RTSP and play

I'm receiving rtp packets containing aac audio chunks encoded by libvo_aacenc (44100hz 128kbps 2ch) from a FFServer instance. I'm trying to decode them with MediaCodec one by one in Android and playback as soon as the chunk is decoded.
Client.java
Player player = new Player();
//RTSP listener
#Override
public void onRTSPPacketReceived(RTPpacket packet) {
byte [] aac_chunk = packet.getpayload();
player.playAAC(aac_chunk);
}
Player.java
private MediaCodec decoder;
private AudioTrack audioTrack;
private MediaExtractor extractor;
public Player(){
extractor = new MediaExtractor();
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
44100, AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_16BIT,
44100,
AudioTrack.MODE_STREAM);
MediaFormat format = new MediaFormat();
format.setString(MediaFormat.KEY_MIME, "audio/mp4a-latm");
format.setInteger(MediaFormat.KEY_BIT_RATE, 128 * 1024);
format.setInteger(MediaFormat.KEY_CHANNEL_COUNT, 2);
format.setInteger(MediaFormat.KEY_SAMPLE_RATE, 44100);
format.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectHE);
try{
decoder = MediaCodec.createDecoderByType("audio/mp4a-latm");
decoder.configure(format, null, null, 0);
} catch (IOException e) {
e.printStackTrace();
}
decoder.start();
audioTrack.play();
}
//Decode and play one aac_chunk
public void playAAC(byte [] data){
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
ByteBuffer[] inputBuffers = decoder.getInputBuffers();
ByteBuffer[] outputBuffers = decoder.getOutputBuffers();
int inIndex = decoder.dequeueInputBuffer(-1);
if (inIndex >= 0) {
ByteBuffer buffer = inputBuffers[inIndex];
buffer.put(data, 0, data.length);
int sampleSize = extractor.readSampleData(buffer, 0);
if (sampleSize < 0) {
decoder.queueInputBuffer(inIndex, 0, 0, 0, MediaCodec.BUFFER_FLAG_END_OF_STREAM);
} else {
long presentationTimeUs = extractor.getSampleTime();
decoder.queueInputBuffer(inIndex, 0, sampleSize, presentationTimeUs, 0);
}
}
int outIndex = decoder.dequeueOutputBuffer(info, TIMEOUT);
while(outIndex >= 0){
ByteBuffer outBuffer = outputBuffers[outIndex];
byte[] decoded_chunk = new byte[info.size];
outBuffer.get(decoded_chunk); // Read the buffer all at once
outBuffer.clear();
//!! Decoded decoded_chunk.length = 0 !!
System.out.println("DECODED CHUNK SIZE: "+decoded_chunk.length);
//Instant play of the decoded chunk
audioTrack.write(decoded_chunk, info.offset, info.offset + info.size);
decoder.releaseOutputBuffer(outIndex, false);
break;
}
decoder.flush();
}
On start, MediaCodec is correctly initiated.
MediaCodec: (0xa5040280) start
MediaCodec: (0xa5040280) input buffers allocated
MediaCodec: (0xa5040280) numBuffers (4)
MediaCodec: (0xa5040280) output buffers allocated
MediaCodec: (0xa5040280) numBuffers (4)
The problem
I'm actually hearing no sound. MediaCodec is working but looks like It's not decoding anything into his Output buffers, since decoded_chunk.length = 0 and outBuffer.limit() = 0 .
Questions
Should I async fill MediaCodec input buffers? Unfortunately I didn't find anything in the examples I found about this problem: instant decode and playback.
I've follow these examples:
Decode and playback AAC file extracting media information. (link)
Same but different way to implement MediaCodec, steps defined (link)
I've solved this using MediaCodec in async mode and MediaCodec.Callback as described in the official docs here which is available only for Android minSdkVersion 21.
Basically I've used a queue for every RTP audio chunk I receive and then I'm notified every time MediaCodec buffers state change. It's actually easier to handle the decoder flow.
decoder.setCallback(new MediaCodec.Callback() {
#Override
public void onInputBufferAvailable(#NonNull MediaCodec mediaCodec, int i) {
//One InputBuffer is available to decode
while (true) {
if(queue.size() > 0) {
byte[] data = queue.removeFirst();
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
ByteBuffer buffer = mediaCodec.getInputBuffer(i);
buffer.put(data, 0, data.length);
mediaCodec.queueInputBuffer(i, 0, data.length, 0, 0);
break;
}
}
}
#Override
public void onOutputBufferAvailable(#NonNull MediaCodec mediaCodec, int i, #NonNull MediaCodec.BufferInfo info) {
//DECODING PACKET ENDED
ByteBuffer outBuffer = mediaCodec.getOutputBuffer(i);
byte[] chunk = new byte[info.size];
outBuffer.get(chunk); // Read the buffer all at once
outBuffer.clear();
audioTrack.write(chunk, info.offset, info.offset + info.size); // AudioTrack write data
mediaCodec.releaseOutputBuffer(i, false);
}
#Override
public void onError(#NonNull MediaCodec mediaCodec, #NonNull MediaCodec.CodecException e) {}
#Override
public void onOutputFormatChanged(#NonNull MediaCodec mediaCodec, #NonNull MediaFormat mediaFormat) {}
});

Android sound wave - not smooth and duration issue

I have a problem to generate smooth sinus wave.
I've done it few years ago on C++ and everything worked perfect. Now I am trying to do this using AudioTrack and I do not know what is wrong.
This is my test case:
I want to produce for five second a sinus wave which is smooth (no crack etc.). For one second I generate 44100 samples and divided it on couple of buffer with size 8192 (probably this is the reason of cracks, but how can I fix it, without giving bigger size of buffer).
Unfortunatelly using my code the sound is not smooth and instead of 5 second it takes about 1 second. I would be gratefull for any help.
Please let me now if this piece of code is not enough.
class Constants:
//<---
public final static int SAMPLING = 44100;
public final static int DEFAULT_GEN_DURATION = 1000;
public final static int DEFAULT_NUM_SAMPLES = DEFAULT_GEN_DURATION * SAMPLING / 1000; //44100 per second
public final static int DEFAULT_BUFFER_SIZE = 8192;
//--->
//preparing buffers to play;
Buffer buffer = new Buffer();
short[] buffer_values = new short[Constants.DEFAULT_BUFFER_SIZE];
float[] samples = new float[Constants.DEFAULT_BUFFER_SIZE];
float d = (float) (( Constants.FREQUENCIES[index] * 2 * Math.PI ) / Constants.SAMPLING);
int numSamples = Constants.DEFAULT_NUM_SAMPLES; //44100 per second - for test
float x = 0;
int index_in_buffer = 0;
for(int i = 0; i < numSamples; i++){
if(index_in_buffer >= Constants.DEFAULT_BUFFER_SIZE - 1){
buffer.setBufferShort(buffer_values);
buffer.setBufferSizeShort(index_in_buffer);
queue_with_data_AL.add(buffer); //add buffer to queue
buffer_values = new short[Constants.DEFAULT_BUFFER_SIZE];
samples = new float[Constants.DEFAULT_BUFFER_SIZE];
index_in_buffer = 0;
}
samples[index_in_buffer] = (float) Math.sin(x);
buffer_values[index_in_buffer] = (short) (samples[index_in_buffer] * Short.MAX_VALUE);
x += d;
index_in_buffer++;
}
buffer.setBufferShort(buffer_values);
buffer.setBufferSizeShort(index_in_buffer+1);
queue_with_data_AL.add(buffer);
index_in_buffer = 0;
}
//class AudioPlayer
public AudioPlayer(int sampleRate) { //44100
int minSize = AudioTrack.getMinBufferSize(sampleRate,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT);
audiotrack = new AudioTrack(AudioManager.STREAM_MUSIC, sampleRate,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT,
minSize, AudioTrack.MODE_STREAM);
}
public void play(byte[] audioData, int sizeOfBuffer) {
audiotrack.write(audioData, 0, sizeOfBuffer);
}
public void start() {
if (state == Constants.STOP_STATE) {
state = Constants.START_STATE;
int startLength = 0;
while (state == Constants.START_STATE) {
Buffer buffer = getBufferFromQueueAL(); //getting buffer from prepared list
if (buffer != null) {
short[] data = buffer.getBufferShort();
int size_of_data = buffer.getBufferSizeShort();
if (data != null) {
int len = audiotrack.write(data, 0, size_of_data);
if (startLength == 0) {
audiotrack.play();
}
startLength += len;
} else {
break;
}
} else {
MessagesLog.e(TAG, "get null data");
break;
}
}
if (audiotrack != null) {
audiotrack.pause();
audiotrack.flush();
audiotrack.stop();
}
}
}
You are playing only 1 second because 44100 samples at a samplerate of 44100Hz result in exactly 1 second of sound.
You have to generate 5 times more samples if you want to play 5 seconds of sound (e.g. multiply DEFAULT_NUM_SAMPLES by 5) in your code.
I've found the solution by myself. After adding Buffer to queue_with_data_AL I've forgotten create new instance of Buffer object. So in queue was couple of buffer with the same instance, hence sinus wave were not continuous.
Thanks if someone was trying to solve my problem. Unfortunatelly it was my programming mistake.
Best regards.

AudioTrack: start called from a thread

I using AudioTrack in static mode to reproduce the same signal over and over again.
I have followed the example in here and sometimes it works perfectly, but sometimes it throws this error and it produces no sound:
AudioTrack: start called from a thread
01-23 15:26:16.902: W/libutils.threads(1133): Thread (this=0x3973b8): don't call waitForExit() from this Thread object's thread. It's a guaranteed deadlock!
This is the source code. I'm trying to ensure that I call stop and reload the data for the next "play" execution.
public class SoundPlayer {
// originally from http://marblemice.blogspot.com/2010/04/generate-and-play-tone-in-android.html
private int numSamples;
private double sample[];
private byte generatedSnd[];
private AudioTrack audioTrack;
public SoundPlayer(float duration, int sampleRate, double freqOfTone) {
super();
this.numSamples = (int) (duration * sampleRate);
this.sample = new double[numSamples];
this.generatedSnd = new byte[2 * numSamples];
// fill out the array
for (int i = 0; i < numSamples; ++i) {
sample[i] = Math.sin(2 * Math.PI * i / (sampleRate / freqOfTone));
}
// convert to 16 bit pcm sound array
// assumes the sample buffer is normalised.
int idx = 0;
for (final double dVal : sample) {
// scale to maximum amplitude
final short val = (short) ((dVal * 32767));
// in 16 bit wav PCM, first byte is the low order byte
generatedSnd[idx++] = (byte) (val & 0x00ff);
generatedSnd[idx++] = (byte) ((val & 0xff00) >>> 8);
}
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC,
sampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, numSamples,
AudioTrack.MODE_STATIC);
audioTrack.write(generatedSnd, 0, generatedSnd.length);
}
public void playSound() {
if ( audioTrack.getPlayState() == (AudioTrack.PLAYSTATE_PLAYING | AudioTrack.PLAYSTATE_PAUSED )) {
audioTrack.stop();
audioTrack.reloadStaticData();
}
Log.i("Audio", "playState: " + audioTrack.getPlayState());
audioTrack.play();
audioTrack.stop();
audioTrack.reloadStaticData();
}
}
If we open the android source code, it does not explains a lot:
void AudioTrack::start()
{
sp<AudioTrackThread> t = mAudioTrackThread;
LOGV("start");
if (t != 0) {
if (t->exitPending()) {
if (t->requestExitAndWait() == WOULD_BLOCK) {
LOGE("AudioTrack::start called from thread");
return;
}
}
t->mLock.lock();
}
Does anyone know how to handle this?
I had a similar problem once.
Simply put, if you happen to have stuff running in more than one thread you have to make sure that creating the AudioTrack and calling play() and stop() on it must happen in the same thread.
However, it doesn't mean that you have to create the audio samples in that thread, too. In case you use static audio data (AudioTrack.MODE_STATIC) you can preload or generate them elsewhere and then use it even multiple times throughout the lifetime of your app.

Categories

Resources