I'm trying to make app that transmits SuperpoweredAndroidAudioIO buffer from one android device to another. With the following code I've managed to send and receive short int buffer from the audio callback. However on the receiving side sound gets very distorted during playback.
Note: For brevity I have not included some code that doesen't seem to have to do with issue, including socket initialisation related functions. If needed I can add the code.
Sending side:
Mic.cpp
static bool SendBuffer(
int sd,
sockaddr_in address,
short int *buffer,
size_t bufferSize) {
// Send data buffer to the socket
ssize_t sentSize = sendto(sd,
buffer,
bufferSize,
0,
(struct sockaddr*)&address,
sizeof address);
// If send is failed
if (sentSize == -1){
__android_log_print(ANDROID_LOG_INFO, "Sent size ", "%i error %i",
sentSize , errno);
}
else if (sentSize > 0) {
__android_log_print(ANDROID_LOG_INFO, "DatagramSent : ", "%hi size: %hi",
buffer, sentSize);
}
return true;
}
// audio callback
static bool micProcessing(
void *clientdata,
short int *audioInputOutput,
int numberOfSamples,
int __unused samplerate) {
return SendBuffer(micSocket, dsocket, audioInputOutput, numberOfSamples);
}
// Constructor
Mic::Mic(JNIEnv *env,
unsigned int samplerate,
unsigned int buffersize,
unsigned int port){
micSocket = NewUdpSocket(env);
dsocket = initDestinationSocket(port); // where to send
__android_log_write(ANDROID_LOG_DEBUG, "Sockets", "initialised");
// init IO
microphone = new SuperpoweredAndroidAudioIO(samplerate,
buffersize,
true,
false,
micProcessing,
this,
-1,
SL_ANDROID_STREAM_MEDIA,
buffersize*2);
__android_log_write(ANDROID_LOG_DEBUG, "Mic", "initialised");
}
Receiving side consists of 2 parts: Mixer and Channel
Mixer.cpp
//audio callback
static bool mainprocess(
void *clientdata,
short int *audioInputOutput,
int numberOfSamples,
int __unused samplerate) {
return ((Mixer*)clientdata)->processMain(audioInputOutput, numberOfSamples);
}
// Setting up Mixer
Mixer::Mixer(JNIEnv *env,unsigned int samplerate, unsigned int buffersize) {
//Short int buffers for recieving
channel1 = new Channel(samplerate,buffersize);
//output buffer
outputFloat = ((float *)memalign(16, (buffersize + 16) * sizeof(float) * 2));
//volumes
outputLevel = 0.5f;
channel1level = 0.2f;
channel2level = 0.2f;
channel3level = 0.2f;
channel4level = 0.2f;
mainmixer = new SuperpoweredMonoMixer();
__android_log_print(ANDROID_LOG_INFO, "Mixer", " Created");
main = new SuperpoweredAndroidAudioIO(
samplerate,
buffersize,
false,
true,
mainprocess,
this,
-1,
SL_ANDROID_STREAM_MEDIA,
buffersize*2);
__android_log_write(ANDROID_LOG_INFO, "Main AudioIO created", " ");
main->stop();
SuperpoweredCPU::setSustainedPerformanceMode(true); // Prevents CPU drops
}
// processing.
bool Mixer::processMain(short int *outputbuffer, unsigned int numberOfSamples{
// a bit awkward
channel1->returnFloatBuffer();
inputs[0] = channel1->floatBuffer;
inputs[1] = NULL;
inputs[2] = NULL;
inputs[3] = NULL;
__android_log_print(ANDROID_LOG_INFO, "Channels are inside", " of mixer");
inputLevels[0] = channel1level;
inputLevels[1] = channel2level;
inputLevels[2] = channel3level;
inputLevels[3] = channel4level;
mainmixer->process(inputs,
outputFloat,
inputLevels,
outputLevel,
numberOfSamples);
SuperpoweredFloatToShortInt(outputFloat, outputbuffer, numberOfSamples);
return true;
}
Channel.cpp
//receiving buffer
static bool ReceiveDatagramFromSocket(int sd, short int *buffer, size_t bufferSize) {
ssize_t recvSize = recvfrom(sd, buffer, bufferSize, 0, NULL, NULL);
if (-1 == recvSize){ // If failed
__android_log_print(ANDROID_LOG_INFO, "receive failed", " %i ", errno);
}
else {
// If data is received
if (recvSize > 0) {
__android_log_print(ANDROID_LOG_INFO, "Received"," %i bytes: %hi", recvSize, buffer);
}
}
return true;
}
// init channel
Channel::Channel(unsigned int samplerate, unsigned int buffersize){
socketIn = NewUdpSocket();
BindSocketToPort(socketIn);
samplerRate = samplerate;
bufferSize = buffersize;
shortIntBuffer = (short int *)malloc((buffersize + 16) * sizeof(short int)*4);
floatBuffer = (float *)memalign(16, (buffersize + 16) * sizeof(float) * 2);
bandEQ = new Superpowered3BandEQ(samplerate);
bandEQ->enable(true);
__android_log_print(ANDROID_LOG_INFO, "Channel ", "created");
}
// this function is called from Mixer.cpp
void Channel::returnFloatBuffer(){
ReceiveDatagramFromSocket(socketIn, shortIntBuffer, bufferSize);
Converting the 16-bit integer samples to 32-bit floating point.
SuperpoweredShortIntToFloat(shortIntBuffer, floatBuffer, bufferSize, 1);
bandEQ->process(floatBuffer, floatBuffer, bufferSize );
__android_log_print(ANDROID_LOG_INFO, "EQ", " processing");
}
At first I thought that because AudioIO on both sided get initialised with different buffer sizes (different devices 240 and 512), but then I tried to hardcode 512 into both of them but it didn't help.
I also tried to increase buffer sizes in sendto() and recvfrom() up to 4096 and it made sound more recognizable but still too distorted.
I should also add that I'm a newbie in C++ and I stuck to 'naive' and 'whatever works' approaches which got me this far.
I want to understand whether I'm on the right track and what approach should I take in order to transmit audio without distortion.
There are two major problems with your approach:
Blocking functions, such as networking should be avoided from the audio processing callback. You need to perform networking (on both sides) from a different thread, and you need some buffering between the audio processing callback and the network thread to pass audio.
You need to "packetize" the transfers, you need to handle network packets on both sides. Network transfer is not fast nor reliable, so you need clever mechanisms to handle this.
In general, the implementation for such audio transfer is much, much more complex to your current code.
Related
I think I'm passing the SuperpoweredAndroidAudioIO to the SuperpoweredRecorder's process() method incorrectly.
My process callback looks like below:
bool SuperpoweredExample::process(short int *audioIO, unsigned int numberOfSamples) {
if (recording) {
recordProcess(audioIO, numberOfSamples);
}
return true;
recordProcess:
void SuperpoweredExample::recordProcess(short *input, unsigned int numberOfSamples) {
SuperpoweredShortIntToFloat(input, stereoBuffer, numberOfSamples);
__android_log_print(ANDROID_LOG_VERBOSE, "SuperpoweredExample", "%i",
recorder->process(stereoBuffer, NULL, numberOfSamples));
}
SuperpoweredRecorder's process() always returns 0 so it has not yet started recording. I assume this is because I'm not passing it the input correctly.
Further relevant code:
stereoBuffer = (float *) memalign(16, (buffersize + 16) * sizeof(float) * 2);
audioSystem = new SuperpoweredAndroidAudioIO(samplerate, buffersize, true, true,
audioProcessing, this, -1, SL_ANDROID_STREAM_MEDIA,
buffersize * 2);
My onRecord():
void SuperpoweredExample::onRecord(bool record) {
if (!record) {
recording = false;
recorder->stop();
} else {
recording = true;
__android_log_print(ANDROID_LOG_VERBOSE, "SuperpoweredExample", "%s", tempPath.c_str());
recorder->start((tempPath + "_TEMP").c_str());
};
}
How do I get SuperpoweredRecorder to create files?
I have tried creating another buffer just for recording but I'm having the same problem. Should I be using createWav()? It says to only use that for offline processing.
I added a separate buffer for recording and that seemed to work.
I have found open source video player for Android, which uses ffmpeg to decode video.
I have some problems with audio, that sometimes plays with jerks, but video picture is shown well. The basic idea of player is that audio and video are decoded in two different streams, and then in the third stream the are passed back, video picture is shown on SurfaceView and video sound is passed in byte array to AudioTrack and then plays. But sometimes sound is lost or playing with jerks. Can anyone give me start point for what to do (some basic concepts). May be I should change buffer size for AudioTrack or add some flags to it. Here is a piece of code, where AudioTrack class is created.
private AudioTrack prepareAudioTrack(int sampleRateInHz,
int numberOfChannels) {
for (;;) {
int channelConfig;
if (numberOfChannels == 1) {
channelConfig = AudioFormat.CHANNEL_OUT_MONO;
} else if (numberOfChannels == 2) {
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
} else if (numberOfChannels == 3) {
channelConfig = AudioFormat.CHANNEL_OUT_FRONT_CENTER
| AudioFormat.CHANNEL_OUT_FRONT_RIGHT
| AudioFormat.CHANNEL_OUT_FRONT_LEFT;
} else if (numberOfChannels == 4) {
channelConfig = AudioFormat.CHANNEL_OUT_QUAD;
} else if (numberOfChannels == 5) {
channelConfig = AudioFormat.CHANNEL_OUT_QUAD
| AudioFormat.CHANNEL_OUT_LOW_FREQUENCY;
} else if (numberOfChannels == 6) {
channelConfig = AudioFormat.CHANNEL_OUT_5POINT1;
} else if (numberOfChannels == 8) {
channelConfig = AudioFormat.CHANNEL_OUT_7POINT1;
} else {
channelConfig = AudioFormat.CHANNEL_OUT_STEREO;
}
try {
Log.d("MyLog","Creating Audio player");
int minBufferSize = AudioTrack.getMinBufferSize(sampleRateInHz,
channelConfig, AudioFormat.ENCODING_PCM_16BIT);
AudioTrack audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC, sampleRateInHz,
channelConfig, AudioFormat.ENCODING_PCM_16BIT,
minBufferSize, AudioTrack.MODE_STREAM);
return audioTrack;
} catch (IllegalArgumentException e) {
if (numberOfChannels > 2) {
numberOfChannels = 2;
} else if (numberOfChannels > 1) {
numberOfChannels = 1;
} else {
throw e;
}
}
}
}
And this is a piece of native code where sound bytes are written to AudioTrack
int player_write_audio(struct DecoderData *decoder_data, JNIEnv *env,
int64_t pts, uint8_t *data, int data_size, int original_data_size) {
struct Player *player = decoder_data->player;
int stream_no = decoder_data->stream_no;
int err = ERROR_NO_ERROR;
int ret;
AVCodecContext * c = player->input_codec_ctxs[stream_no];
AVStream *stream = player->input_streams[stream_no];
LOGI(10, "player_write_audio Writing audio frame")
jbyteArray samples_byte_array = (*env)->NewByteArray(env, data_size);
if (samples_byte_array == NULL) {
err = -ERROR_NOT_CREATED_AUDIO_SAMPLE_BYTE_ARRAY;
goto end;
}
if (pts != AV_NOPTS_VALUE) {
player->audio_clock = av_rescale_q(pts, stream->time_base, AV_TIME_BASE_Q);
LOGI(9, "player_write_audio - read from pts")
} else {
int64_t sample_time = original_data_size;
sample_time *= 1000000ll;
sample_time /= c->channels;
sample_time /= c->sample_rate;
sample_time /= av_get_bytes_per_sample(c->sample_fmt);
player->audio_clock += sample_time;
LOGI(9, "player_write_audio - added")
}
enum WaitFuncRet wait_ret = player_wait_for_frame(player,
player->audio_clock + AUDIO_TIME_ADJUST_US, stream_no);
if (wait_ret == WAIT_FUNC_RET_SKIP) {
goto end;
}
LOGI(10, "player_write_audio Writing sample data")
jbyte *jni_samples = (*env)->GetByteArrayElements(env, samples_byte_array,
NULL);
memcpy(jni_samples, data, data_size);
(*env)->ReleaseByteArrayElements(env, samples_byte_array, jni_samples, 0);
LOGI(10, "player_write_audio playing audio track");
ret = (*env)->CallIntMethod(env, player->audio_track,
player->audio_track_write_method, samples_byte_array, 0, data_size);
jthrowable exc = (*env)->ExceptionOccurred(env);
if (exc) {
err = -ERROR_PLAYING_AUDIO;
LOGE(3, "Could not write audio track: reason in exception");
// TODO maybe release exc
goto free_local_ref;
}
if (ret < 0) {
err = -ERROR_PLAYING_AUDIO;
LOGE(3,
"Could not write audio track: reason: %d look in AudioTrack.write()", ret);
goto free_local_ref;
}
free_local_ref:
LOGI(10, "player_write_audio releasing local ref");
(*env)->DeleteLocalRef(env, samples_byte_array);
end: return err;
}
I will be pleased for any help!!!! Thank you very much!!!!
I had the same problem. The problem is for start point of audio data that write to audio player. In PCM data each 2 byte of data create one sample of audio base on little_endian conversion. for correct playing the PCM data samples must be correctly create an write to audio player. If the start point of reading buffer is not the first byte of sample then the samples of audio can not create correctly and sound will be destroyed. In my situation I read samples from file. In some times the start point of reading data from file had been second byte of sample and then the all data that I read from file had been decode uncorrectly. I solve the problem by checking the start point and if the start point is odd number I increase that and change it to even number.
excuse me for bad english.
I'm trying to set up OpenSL AudioPlayer to use memory I've allocated to playback a wav file. I want to do this so I can have multiple AudioPlayers that share the same data and conserve memory.
I've tried to give openSL the entire file and tell it that it is a WAVE with format_mime
SLDataLocator_Address loc_fd = {SL_DATALOCATOR_ADDRESS, data, size};
SLDataFormat_MIME format_mime = { SL_DATAFORMAT_MIME, (SLchar*)"audio/x-wav",SL_CONTAINERTYPE_WAV};
SLDataSource audioSrc = { &loc_fd, &format_mime };
// configure audio sink
SLDataLocator_OutputMix loc_outmix = { SL_DATALOCATOR_OUTPUTMIX,outputMixObject };
SLDataSink audioSnk = { &loc_outmix, 0 };
// create audio player
const SLInterfaceID ids[2] = { SL_IID_SEEK, SL_IID_PLAYBACKRATE };
const SLboolean req[2] = { SL_BOOLEAN_FALSE, SL_BOOLEAN_FALSE };
result = (*engineEngine)->CreateAudioPlayer(engineEngine,&uriPlayerObject[cntSOUND],&audioSrc, &audioSnk, 0, ids, req);
and I have parsed the WAVE data myself and loaded format_pcm
SLDataFormat_PCM format_pcm;
format_pcm.formatType = SL_DATAFORMAT_PCM;
char* wavParser = isWAVE(data);
if(wavParser == NULL)
{
Log("NOT A WAVE!");
return -1;
}
char* fmtChunk = getChunk("fmt ", data, size);
parsefmtChunk(fmtChunk, &format_pcm);
char* dataChunk = getChunk("data",data, size);
dataChunk += 4;
unsigned int dataSize = *((unsigned int*)dataChunk);
dataChunk += 4;
format_pcm.channelMask = 0;
format_pcm.containerSize = 16;
format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN;
loc_fd.pAddress = dataChunk;
loc_fd.length = dataSize;
The parsefmtChunk function is
void parsefmtChunk(char* fmtchunk, SLDataFormat_PCM* pcm)
{
char* data = fmtchunk + 8;
unsigned short audioFormat = *((unsigned short*)data);
if(audioFormat != 1)
{
Log("Not PCM!");
Log("Reached Line:%d in File %s", __LINE__, __FILE__);
return;
}
data += 2;
pcm->numChannels = *((unsigned short*)data);
data += 2;
pcm->samplesPerSec = *((unsigned int*)data);
data += 4;
//Byte Rate
data += 4;
//Block Align
data += 2;
//BitsPerSample
pcm->bitsPerSample = *((unsigned short*)data);
(Are Byte Rate and Block Align supposed to be used somehow to fill out the pcm struct?)
but whenever I create the audioplayer I get SL_RESULT_CONTENT_UNSUPPORTED
This is what I log from my parsefmt function
Channels:2
samplesPerSec:44100
bitsPerSample:16
from android-ndk-r8b/docs/opensles/index.html
PCM data format
The PCM data format can be used with buffer queues only.
So SLDataFormat_PCM CANNOT be used with SLDataLocator_Address like I assumed.
I can do what I want with a Buffer Queue instead by using just one big queue like so
bufferqueueitf->Enqueue(bufferqueueitf,dataChunk,dataSize);
Have you tried this?
SLDataFormat_MIME format_mime = {SL_DATAFORMAT_MIME, NULL, SL_CONTAINERTYPE_UNSPECIFIED};
The Android implementation of OpenSL ES isn't totally compliant and http://mobilepearls.com/labs/native-android-api/ndk/docs/opensles/ recommends the following:
The Android implementation of OpenSL ES requires that mimeType be initialized to either NULL or a valid UTF-8 string, and that containerType be initialized to a valid value. In the absence of other considerations, such as portability to other implementations, or content format which cannot be identified by header, we recommend that you set the mimeType to NULL and containerType to SL_CONTAINERTYPE_UNSPECIFIED.
Also, make sure you're giving it a valid URI.
i want to send an audio stream from PC (C++ application, using FMOD-API to decode audio data and send via UDP Socket) to an android device. The communication already works and i can hear "sound" (100ms sound, followed by 900ms silence, alternating) on the android.
I don't know why the sound is stuttering - on the PC the same audio stream is played fine in nice quality. I think the problem is on the android..
Here is the code:
DatagramSocket sock = new DatagramSocket(12345);
byte []bSockBuffer = new byte[1024];
byte []bRecvBufTmp;
int iAudioBufSize, iCurAudioBufPos = 0;
sock.setReceiveBufferSize(bSockBuffer.length);
// Audio Stream initialisieren:
iAudioBufSize = AudioTrack.getMinBufferSize(44100, AudioFormat.CHANNEL_OUT_STEREO, AudioFormat.ENCODING_PCM_16BIT);
AudioTrack track = new AudioTrack(AudioManager.STREAM_MUSIC, 44100, AudioFormat.CHANNEL_OUT_STEREO,
AudioFormat.ENCODING_PCM_16BIT, iAudioBufSize, AudioTrack.MODE_STREAM);
track.play();
while (true)
{
DatagramPacket pack = new DatagramPacket(bSockBuffer, bSockBuffer.length);
// Paket empfangen:
sock.receive(pack);
track.write(pack.getData(), 0, pack.getLength());
}
I'm sure to set up 'AudioTrack' object correctly, settings compare to my settings in the c++ application.
An other step was pre-buffering the received socket-data in a temporary 'byte[]' variable and writing it to the AudioTrack-object when the size of the buffer 'iAudioBufSize' was reached.
This did not helped.
Any idears?
Thanks
[EDIT]
Code of C++ Application, used sample "manualdecode" of FMOD API examples:
FMOD_RESULT F_CALLBACK pcmreadcallback(FMOD_SOUND *sound, void *data, unsigned int datalen)
{
CCtrlSocket *cClientTmp = /* Obtaining target client sock here */;
FMOD_RESULT result;
unsigned int read, uSentTmp, uSizeTmp;
EnterCriticalSection(&decodecrit);
if (!decodesound)
return (FMOD_ERR_FILE_EOF);
result = decodesound->readData(data, datalen, &read);
if (result == FMOD_ERR_FILE_EOF)
{
// Handle looping:
decodesound->seekData(0);
datalen -= read;
result = decodesound->readData((char*) data + read, datalen, &read);
}
// Split package in multiple parts:
uSentTmp = 0;
do
{
uSizeTmp = (read - uSentTmp);
if (uSizeTmp > 1024)
uSizeTmp = 1024;
uSentTmp += cClientTmp->SendAudioData((char*) data + uSentTmp, uSizeTmp);
} while (uSentTmp < read);
LeaveCriticalSection(&decodecrit);
return (FMOD_OK);
}
I've done this problem.
The mess was an entry in a logfile that has cost lots of time creating a lag :(
Now i can hear the streamed music on my android client. But there are still some lags. I've experimented a LOT of values for socket and AudioTrack buffers.
I have compared the amount of sent and received bytes: In 20 secs sending 9170000 bytes of data results in receiving 8120000 bytes on android device. At first the stream is played fast for 3 secs (that means buffer's full?). After 30 secs the stream lags (which means buffer's empty?).
In general the music quality is very good, but there is a sizzling noise all the time (which indicates lost socket packages?).
My 'PlaybackStart()' function has changed - i'm not using a PCM read callback anymore:
FMOD_RESULT CAudioStream::PlaybackStart()
{
CCtrlSocket *cClientTmp;
unsigned int read, uSentTmp, uSizeTmp;
FMOD_RESULT result;
result = system->createStream("C:\\test.mp3", FMOD_OPENONLY | FMOD_ACCURATETIME, 0, &sound);
if(result != FMOD_OK)
return (result);
int iChannels, iBits;
FMOD_SOUND_FORMAT fFormat;
FMOD_SOUND_TYPE fType;
result = sound->getFormat(&fType, &fFormat, &iChannels, &iBits);
if(result != FMOD_OK)
return (result);
void *data;
unsigned int length = 0;
int iSampleSec = 1; // Playtime
int iSampleSize = (44100 * 2 * sizeof(signed short) * iSampleSec);
int iSleep = 6; // Sleep after sending a package
DWORD dSleepTotal;
result = sound->getLength(&length, FMOD_TIMEUNIT_PCMBYTES);
if(result != FMOD_OK)
return (result);
data = malloc(iSampleSize);
if (!data)
return (FMOD_RESULT_FORCEINT);
cClientTmp = (CCtrlSocket*) CCtrlSocket::cServerSock.GetClientSock(CCtrlSocket::cServerSock.GetClientSockCount() - 1);
do
{
result = sound->readData((char*) data, iSampleSize, &read);
if ((result != FMOD_OK) && (result != FMOD_ERR_FILE_EOF))
ASSERT(FALSE);
else if (read > 0)
{
dSleepTotal = 0;
for (int i = 0; i < read; i += NET_SVR_AUDIO_BUFFER)
{
// MIN_VAL_LIMITED ((MIN_VAL(VAL1, VAL2) <= LIMIT) ? LIMIT : MIN_VAL(VAL1, VAL2))
cClientTmp->SendAudioData((char*) data + i, MIN_VAL_LIMITED(NET_SVR_AUDIO_BUFFER, (read - i), 0));
// Sleep after sending every package:
Sleep(iSleep);
dSleepTotal += iSleep;
}
if (dSleepTotal < (iSampleSec * 1000))
{
dSleepTotal = (iSampleSec * 1000) - dSleepTotal;
// Sleep after sending every second playtime:
Sleep(dSleepTotal);
}
}
} while (read > 0);
result = sound->release();
if(result != FMOD_OK)
return (result);
result = system->close();
if(result != FMOD_OK)
return (result);
result = system->release();
if(result != FMOD_OK)
return (result);
return (result);
}
I have experimented with different sleep-timings, too.
I have changed the default media framework in Android from Stagefright to Gstreamer. This has been done to make it flexible for our project.
But when I run some apks, all the sounds of the app is getting played at the time of app start, and it does not play after that showing the error "sample # not READY" from Soundpool. For example in an App Baby piano, the sounds of the piano syllables are getting played when I start the application and when I actually click on the piano after entering into the play mode, it is not getting played.
The problem I think is that when the sounds are being loaded into the Soundpool, the Gstreamer Mediaplayer object is created and it gets played and it is being done at the start of the App.
In the log it is showing, Sample Channel Count(0) out of range. It is happening from the SoundPoool.cpp file from the section below.
status_t Sample::doLoad() {
uint32_t sampleRate;
int numChannels;
int format;
sp<IMemory> p;
LOGW("Start decode");
if (mUrl) {
p = MediaPlayer::decode(mUrl, &sampleRate, &numChannels, &format);
} else {
p = MediaPlayer::decode(mFd, mOffset, mLength, &sampleRate, &numChannels, &format);
LOGW("close(%d)", mFd);
::close(mFd);
mFd = -1;
}
if (p == 0) {
LOGE("Unable to load sample: %s", mUrl);
return -1;
}
LOGW("pointer = %p, size = %u, sampleRate = %u, numChannels = %d",
p->pointer(), p->size(), sampleRate, numChannels);
if (sampleRate > kMaxSampleRate) {
LOGE("Sample rate (%u) out of range", sampleRate);
return - 1;
}
if ((numChannels < 1) || (numChannels > 2)) {
LOGE("Sample channel count (%d) out of range", numChannels);
return - 1;
}
//_dumpBuffer(p->pointer(), p->size());
uint8_t* q = static_cast<uint8_t*>(p->pointer()) + p->size() - 10;
//_dumpBuffer(q, 10, 10, false);
mData = p;
mSize = p->size();
mSampleRate = sampleRate;
mNumChannels = numChannels;
mFormat = format;
mState = READY;
return 0; }
and the MediaPlayerService decode function returns all the values as null from the code section below
sp<IMemory> MediaPlayerService::decode(int fd, int64_t offset, int64_t length, uint32_t *pSampleRate, int* pNumChannels, int* pFormat)
{
LOGD("decode(%d, %lld, %lld)", fd, offset, length);
sp<MemoryBase> mem;
sp<MediaPlayerBase> player;
player_type playerType = getPlayerType(fd, offset, length);
LOGD("player type = %d", playerType);
// create the right type of player
sp<AudioCache> cache = new AudioCache("decode_fd");
player = android::createPlayer(playerType, cache.get(), cache->notify);
if (player == NULL) goto Exit;
if (player->hardwareOutput()) goto Exit;
static_cast<MediaPlayerInterface*>(player.get())->setAudioSink(cache);
// set data source
if (player->setDataSource(fd, offset, length) != NO_ERROR) goto Exit;
LOGD("prepare");
player->prepareAsync();
LOGD("wait for prepare");
if (cache->wait() != NO_ERROR) goto Exit;
LOGD("start");
player->start();
LOGD("wait for playback complete");
if (cache->wait() != NO_ERROR) goto Exit;
mem = new MemoryBase(cache->getHeap(), 0, cache->size());
*pSampleRate = cache->sampleRate();//Nes
*pNumChannels = cache->channelCount();
*pFormat = cache->format();
LOGD("return memory # %p, sampleRate=%u, channelCount = %d, format = %d", mem->pointer(), *pSampleRate, *pNumChannels, *pFormat);
Exit:
if (player != 0) player->reset();
::close(fd);
return mem;
}
The samplerate, channels etc have value 0 getting returned from this function.
After this when the samples are played, it is showing the error ""sample # not READY"
int SoundPool::play(int sampleID, float leftVolume, float rightVolume,
int priority, int loop, float rate)
{
LOGW("sampleID=%d, leftVolume=%f, rightVolume=%f, priority=%d, loop=%d, rate=%f",
sampleID, leftVolume, rightVolume, priority, loop, rate);
sp<Sample> sample;
SoundChannel* channel;
int channelID;
// scope for lock
{
Mutex::Autolock lock(&mLock);
// is sample ready?
sample = findSample(sampleID);
if ((sample == 0) || (sample->state() != Sample::READY)) {
LOGW(" sample %d not READY", sampleID);
return 0;
}
dump();
// allocate a channel
channel = allocateChannel(priority);
// no channel allocated - return 0
if (!channel) {
LOGW("No channel allocated");
return 0;
}
channelID = ++mNextChannelID;
}
LOGW("channel state = %d", channel->state());
channel->play(sample, channelID, leftVolume, rightVolume, priority, loop, rate);
return channelID;
}
Is there a solution for this problem..Plz help..
You can use this methode also
public void loadSound (String strSound, int stream) {
boolean loaded = false;
mSoundPool.setOnLoadCompleteListener(new OnLoadCompleteListener() {
#Override
public void onLoadComplete(SoundPool soundPool, int sampleId,
int status) {
mSoundPool.play(stream, streamVolume, streamVolume, 1, LOOP_1_TIME, 1f);
}
});
try {
stream= mSoundPool.load(aMan.openFd(strSound), 1);
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}
Sample not ready usually indicates that it hasn't been loaded yet, i.e. it's still loading. (Perhaps the framework you switched to might be loading it for a longer time.)
You should subscribe to its onLoadCompleteListener, and when you receive the callback, the sound is ready to be played. Before that point, it won't be playable.
I had the same problem for android 2.0 and solved by using the .ogg format instead of .mp3 format for the sounds i use as it is mentioned here. I hope this solves your problem.