I believe the major problem comes from the read method in MicrophoneManager but cant see where the problem is. My console output for bytesread is 0 (this is in the other class AudioTransmitter). It seems to me like its not streaming audio data since the data sent off is none changing and like I said bytes read is 0.
public class MicrophoneManager{
// private TargetDataLine targetDataLine;
private float sampleRate = 8000.0F; //8000,11025,16000,22050,44100
private int sampleSizeInBits = 16; //8,16
private int channels = 1; //1,2
private boolean signed = true; //true,false
private boolean bigEndian = false; //true,false
private AudioFormat audioFormat;
// private AudioRecord audioRecord;
// private AudioInputStream ais;
private static MicrophoneManager singletonMicrophoneManager = null;
public AudioRecord audioRecord;
public int mSamplesRead; //how many samples read
public int buffersizebytes;
public int buflen;
public int channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
public int audioEncoding = AudioFormat.ENCODING_PCM_16BIT;
public static byte[] buffer; //+-32767
public static final int SAMPPERSEC = 8000; //samp per sec 8000, 11025, 22050 44100 or 48000
public class MicrophoneManager{
// private TargetDataLine targetDataLine;
private float sampleRate = 8000.0F; //8000,11025,16000,22050,44100
private int sampleSizeInBits = 16; //8,16
private int channels = 1; //1,2
private boolean signed = true; //true,false
private boolean bigEndian = false; //true,false
private AudioFormat audioFormat;
// private AudioRecord audioRecord;
// private AudioInputStream ais;
private static MicrophoneManager singletonMicrophoneManager = null;
public AudioRecord audioRecord;
public int mSamplesRead; //how many samples read
public int buffersizebytes;
public int buflen;
public int channelConfiguration = AudioFormat.CHANNEL_CONFIGURATION_MONO;
public int audioEncoding = AudioFormat.ENCODING_PCM_16BIT;
public static short[] buffer; //+-32767
public static final int SAMPPERSEC = 8000; //samp per sec 8000, 11025, 22050 44100 or 48000
public MicrophoneManager() {
System.out.println("Initializing");
// audioFormat = new AudioFormat(sampleRate,sampleSizeInBits,channels,signed,bigEndian);
// audioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT, (int) sampleRate, channels, AudioFormat.ENCODING_PCM_16BIT, buffersizebytes);
buffersizebytes = AudioRecord.getMinBufferSize(SAMPPERSEC,channelConfiguration,audioEncoding); //4096 on ion
buffer = new short[buffersizebytes];
buflen=buffersizebytes/2;
audioRecord = new AudioRecord(android.media.MediaRecorder.AudioSource.MIC,SAMPPERSEC,
channelConfiguration,audioEncoding,buffersizebytes); //constructor
}
public static MicrophoneManager getMicrophoneManager() throws Exception {
if (singletonMicrophoneManager == null) {
singletonMicrophoneManager = new MicrophoneManager();
singletonMicrophoneManager.initialize();
}
return singletonMicrophoneManager;
}
public void initialize() throws Exception {
}
public void startAudioInput(){
try {
audioRecord.startRecording();
mSamplesRead = audioRecord.read(buffer, 0, buffer.length);
audioRecord.stop();
} catch (Throwable t) {
// Log.e("AudioRecord", "Recording Failed");
System.out.println("Error Starting audio input"+t);
}
}
public void stopAudioInput(){
audioRecord.stop();
System.out.println("Stopping audio input");
}
public void finishAudioInput(){
audioRecord.release();
System.out.println("Finishing audio input");
}
public boolean available() throws Exception {
return true;
}
public int read(byte[] inBuf) throws Exception {
return audioRecord.read(inBuf,0,inBuf.length);
}
}
AudioTransmitter:
public class AudioTransmitter extends Thread{
private MicrophoneManager mm=null;
private boolean transmittingAudio = false;
private String host;
private int port;
private long id=0;
boolean run=true;
public AudioTransmitter(String host, int port, long id) {
this.host = host;
this.port = port;
this.id = id;
this.start();
}
public void run() {
System.out.println("creating audio transmitter host "+host+" port "+port+" id "+id);
TrustManager[] trustAllCerts = new TrustManager[]{
new X509TrustManager() {
public java.security.cert.X509Certificate[] getAcceptedIssuers() {
return null;
}
public void checkClientTrusted(
java.security.cert.X509Certificate[] certs, String authType) {
}
public void checkServerTrusted(
java.security.cert.X509Certificate[] chain, String authType) {
for (int j=0; j<chain.length; j++)
{
System.out.println("Client certificate information:");
System.out.println(" Subject DN: " + chain[j].getSubjectDN());
System.out.println(" Issuer DN: " + chain[j].getIssuerDN());
System.out.println(" Serial number: " + chain[j].getSerialNumber());
System.out.println("");
}
}
}
};
while (run) {
if(transmittingAudio) {
try {
if(mm==null) {
mm = new MicrophoneManager();
// mm.initialize();
}
SSLContext sc = SSLContext.getInstance("SSL");
sc.init(null, trustAllCerts, new java.security.SecureRandom());
SSLSocketFactory sslFact = sc.getSocketFactory();
SSLSocket socket = (SSLSocket)sslFact.createSocket(host, port);
socket.setSoTimeout(10000);
InputStream inputStream = socket.getInputStream();
DataInputStream in = new DataInputStream(new BufferedInputStream(inputStream));
OutputStream outputStream = socket.getOutputStream();
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(outputStream));
PrintWriter socketPrinter = new PrintWriter(os);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
// socketPrinter.println("POST /transmitaudio?patient=1333369798370 HTTP/1.0");
socketPrinter.println("POST /transmitaudio?id="+id+" HTTP/1.0");
socketPrinter.println("Content-Type: audio/basic");
socketPrinter.println("Content-Length: 99999");
socketPrinter.println("Connection: Keep-Alive");
socketPrinter.println("Cache-Control: no-cache");
socketPrinter.println();
socketPrinter.flush();
// in.read();
mm.startAudioInput();
int buffersizebytes = AudioRecord.getMinBufferSize(8000, AudioFormat.CHANNEL_CONFIGURATION_MONO,AudioFormat.ENCODING_PCM_16BIT); //4096 on ion
System.out.println("audio started");
byte[] data = new byte[buffersizebytes];
while(transmittingAudio) {
// byte[] data = new byte[mm.available()];
int bytesRead = mm.read(data);
os.write(data,0,bytesRead);
os.flush();
// ca.transmitAxisAudioPacket(data);
// System.out.println("read "+data);
System.out.println("bytesRead "+bytesRead);
System.out.println("data "+Arrays.toString(data));
}
os.close();
mm.stopAudioInput();
} catch (Exception e) {
System.out.println("excpetion while transmitting audio connection will be closed"+e);
transmittingAudio=false;
}
}
else {
try {
Thread.sleep(1000);
} catch (Exception e){
System.out.println("exception while thread sleeping"+e);}
}
}
}
public void setTransmittingAudio(boolean transmittingAudio) {
this.transmittingAudio = transmittingAudio;
}
public void finished() {
this.transmittingAudio = false;
mm.finishAudioInput();
}
}
You are calling
mSamplesRead = audioRecord.read(buffer, 0, buffersizebytes);
There are a few problems with that.
audioRecord.read() wants the length of the array you are reading in to, not the size in bytes. You have set your encoding to 16 bit. You should really be doing something like:
new short[] buffer = short[1024]; // or whatever length you like
mSampleRead = audioRecord.read(buffer,0,buffer.length);
You are calling to read buffersizebytes but you set buffer = new byte[1024];. No particular reason to think buffersizebytes is the right number is there? You want a short[] array with 16 bit encoding, and you want the number of SAMPLES (not bytes) you read to be less than or equal to the length of that short[] buffer.
Also, you will be in better shape if you print out the exception you get when they are thrown, change
System.out.println("Error Starting audio input");
to
System.out.println("Error Starting audio input" + t);
and you will at least have a hint why android is throwing you in the dustbin.
Thank you guys for you help however I found a better way to do whats needed (here). Here is the code. Notice this also uses http://www.devdaily.com/java/jwarehouse/android/core/java/android/speech/srec/UlawEncoderInputStream.java.shtml to convert the audio to ulaw
public class AudioWorker extends Thread
{
private boolean stopped = false;
private String host;
private int port;
private long id=0;
boolean run=true;
AudioRecord recorder;
//ulaw encoder stuff
private final static String TAG = "UlawEncoderInputStream";
private final static int MAX_ULAW = 8192;
private final static int SCALE_BITS = 16;
private InputStream mIn;
private int mMax = 0;
private final byte[] mBuf = new byte[1024];
private int mBufCount = 0; // should be 0 or 1
private final byte[] mOneByte = new byte[1];
////
/**
* Give the thread high priority so that it's not canceled unexpectedly, and start it
*/
public AudioWorker(String host, int port, long id)
{
this.host = host;
this.port = port;
this.id = id;
android.os.Process.setThreadPriority(android.os.Process.THREAD_PRIORITY_URGENT_AUDIO);
// start();
}
#Override
public void run()
{
Log.i("AudioWorker", "Running AudioWorker Thread");
recorder = null;
AudioTrack track = null;
short[][] buffers = new short[256][160];
int ix = 0;
/*
* Initialize buffer to hold continuously recorded AudioWorker data, start recording, and start
* playback.
*/
try
{
int N = AudioRecord.getMinBufferSize(8000,AudioFormat.CHANNEL_IN_MONO,AudioFormat.ENCODING_PCM_16BIT);
recorder = new AudioRecord(AudioSource.MIC, 8000, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, N*10);
track = new AudioTrack(AudioManager.STREAM_MUSIC, 8000,
AudioFormat.CHANNEL_OUT_MONO, AudioFormat.ENCODING_PCM_16BIT, N*10, AudioTrack.MODE_STREAM);
recorder.startRecording();
track.play();
/*
* Loops until something outside of this thread stops it.
* Reads the data from the recorder and writes it to the AudioWorker track for playback.
*/
SSLContext sc = SSLContext.getInstance("SSL");
sc.init(null, trustAllCerts, new java.security.SecureRandom());
SSLSocketFactory sslFact = sc.getSocketFactory();
SSLSocket socket = (SSLSocket)sslFact.createSocket(host, port);
socket.setSoTimeout(10000);
InputStream inputStream = socket.getInputStream();
DataInputStream in = new DataInputStream(new BufferedInputStream(inputStream));
OutputStream outputStream = socket.getOutputStream();
DataOutputStream os = new DataOutputStream(new BufferedOutputStream(outputStream));
PrintWriter socketPrinter = new PrintWriter(os);
BufferedReader br = new BufferedReader(new InputStreamReader(in));
// socketPrinter.println("POST /transmitaudio?patient=1333369798370 HTTP/1.0");
socketPrinter.println("POST /transmitaudio?id="+id+" HTTP/1.0");
socketPrinter.println("Content-Type: AudioWorker/basic");
socketPrinter.println("Content-Length: 99999");
socketPrinter.println("Connection: Keep-Alive");
socketPrinter.println("Cache-Control: no-cache");
socketPrinter.println();
socketPrinter.flush();
while(!stopped)
{
Log.i("Map", "Writing new data to buffer");
short[] buffer = buffers[ix++ % buffers.length];
N = recorder.read(buffer,0,buffer.length);
track.write(buffer, 0, buffer.length);
byte[] bytes2 = new byte[buffer.length * 2];
ByteBuffer.wrap(bytes2).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().put(buffer);
read(bytes2, 0, bytes2.length);
// os.write(bytes2,0,bytes2.length);
os.write(bytes2,0,bytes2.length);
System.out.println("bytesRead "+buffer.length);
System.out.println("data "+Arrays.toString(buffer));
}
os.close();
}
catch(Throwable x)
{
Log.w("AudioWorker", "Error reading voice AudioWorker", x);
}
/*
* Frees the thread's resources after the loop completes so that it can be run again
*/
finally
{
recorder.stop();
recorder.release();
track.stop();
track.release();
}
}
/**
* Called from outside of the thread in order to stop the recording/playback loop
*/
/**
* Called from outside of the thread in order to stop the recording/playback loop
*/
public void close()
{
stopped = true;
}
public void resumeThread()
{
stopped = false;
run();
}
TrustManager[] trustAllCerts = new TrustManager[]{
new X509TrustManager() {
public java.security.cert.X509Certificate[] getAcceptedIssuers() {
return null;
}
public void checkClientTrusted(
java.security.cert.X509Certificate[] certs, String authType) {
}
public void checkServerTrusted(
java.security.cert.X509Certificate[] chain, String authType) {
for (int j=0; j<chain.length; j++)
{
System.out.println("Client certificate information:");
System.out.println(" Subject DN: " + chain[j].getSubjectDN());
System.out.println(" Issuer DN: " + chain[j].getIssuerDN());
System.out.println(" Serial number: " + chain[j].getSerialNumber());
System.out.println("");
}
}
}
};
public static void encode(byte[] pcmBuf, int pcmOffset,
byte[] ulawBuf, int ulawOffset, int length, int max) {
// from 'ulaw' in wikipedia
// +8191 to +8159 0x80
// +8158 to +4063 in 16 intervals of 256 0x80 + interval number
// +4062 to +2015 in 16 intervals of 128 0x90 + interval number
// +2014 to +991 in 16 intervals of 64 0xA0 + interval number
// +990 to +479 in 16 intervals of 32 0xB0 + interval number
// +478 to +223 in 16 intervals of 16 0xC0 + interval number
// +222 to +95 in 16 intervals of 8 0xD0 + interval number
// +94 to +31 in 16 intervals of 4 0xE0 + interval number
// +30 to +1 in 15 intervals of 2 0xF0 + interval number
// 0 0xFF
// -1 0x7F
// -31 to -2 in 15 intervals of 2 0x70 + interval number
// -95 to -32 in 16 intervals of 4 0x60 + interval number
// -223 to -96 in 16 intervals of 8 0x50 + interval number
// -479 to -224 in 16 intervals of 16 0x40 + interval number
// -991 to -480 in 16 intervals of 32 0x30 + interval number
// -2015 to -992 in 16 intervals of 64 0x20 + interval number
// -4063 to -2016 in 16 intervals of 128 0x10 + interval number
// -8159 to -4064 in 16 intervals of 256 0x00 + interval number
// -8192 to -8160 0x00
// set scale factors
if (max <= 0) max = MAX_ULAW;
int coef = MAX_ULAW * (1 << SCALE_BITS) / max;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[pcmOffset++]) + (pcmBuf[pcmOffset++] << 8);
pcm = (pcm * coef) >> SCALE_BITS;
int ulaw;
if (pcm >= 0) {
ulaw = pcm <= 0 ? 0xff :
pcm <= 30 ? 0xf0 + (( 30 - pcm) >> 1) :
pcm <= 94 ? 0xe0 + (( 94 - pcm) >> 2) :
pcm <= 222 ? 0xd0 + (( 222 - pcm) >> 3) :
pcm <= 478 ? 0xc0 + (( 478 - pcm) >> 4) :
pcm <= 990 ? 0xb0 + (( 990 - pcm) >> 5) :
pcm <= 2014 ? 0xa0 + ((2014 - pcm) >> 6) :
pcm <= 4062 ? 0x90 + ((4062 - pcm) >> 7) :
pcm <= 8158 ? 0x80 + ((8158 - pcm) >> 8) :
0x80;
} else {
ulaw = -1 <= pcm ? 0x7f :
-31 <= pcm ? 0x70 + ((pcm - -31) >> 1) :
-95 <= pcm ? 0x60 + ((pcm - -95) >> 2) :
-223 <= pcm ? 0x50 + ((pcm - -223) >> 3) :
-479 <= pcm ? 0x40 + ((pcm - -479) >> 4) :
-991 <= pcm ? 0x30 + ((pcm - -991) >> 5) :
-2015 <= pcm ? 0x20 + ((pcm - -2015) >> 6) :
-4063 <= pcm ? 0x10 + ((pcm - -4063) >> 7) :
-8159 <= pcm ? 0x00 + ((pcm - -8159) >> 8) :
0x00;
}
ulawBuf[ulawOffset++] = (byte)ulaw;
}
}
public static int maxAbsPcm(byte[] pcmBuf, int offset, int length) {
int max = 0;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[offset++]) + (pcmBuf[offset++] << 8);
if (pcm < 0) pcm = -pcm;
if (pcm > max) max = pcm;
}
return max;
}
public int read(byte[] buf, int offset, int length) throws IOException {
if (recorder == null) throw new IllegalStateException("not open");
// return at least one byte, but try to fill 'length'
while (mBufCount < 2) {
int n = recorder.read(mBuf, mBufCount, Math.min(length * 2, mBuf.length - mBufCount));
if (n == -1) return -1;
mBufCount += n;
}
// compand data
int n = Math.min(mBufCount / 2, length);
encode(mBuf, 0, buf, offset, n, mMax);
// move data to bottom of mBuf
mBufCount -= n * 2;
for (int i = 0; i < mBufCount; i++) mBuf[i] = mBuf[i + n * 2];
return n;
}
}
Related
I'm trying to generate a wav file with ulaw encoding from raw pcm data.
I have tried some of the solutions by finding in google and here, but I can't seem to make the audio nicely playable. Its noisy and sort of screenching sound. May be the audio is playing too fast, not sure.
So, here is the code that I have tried so far.
I have my AudioRecorder.java that has the recording and saving that data as raw pcm file, converting of raw pcm data to ulaw encoded data, creation of wav header and then combining both the streams of data and saving as one .wav file
public class AudioRecorder {
private static final String TAG = "AudioRecorder";
private int audioInput = MediaRecorder.AudioSource.MIC;
private int audioSampleRate = 8000; //frequency which ranges from 8K Hz to 44.1K Hz
private int audioChannel = AudioFormat.CHANNEL_IN_MONO;
private int audioEncode = AudioFormat.ENCODING_PCM_16BIT;
private int bufferSizeInBytes = 0;
private AudioRecord audioRecord;
private Status status = Status.STATUS_NO_READY;
protected String pcmFileName;
private int currentPosition = 0;
private int lastVolumn = 0;
private FileOutputStream fosPcm = null;
public AudioRecorder() {
pcmFileName = AudioFileUtils.getPcmFileAbsolutePath(RECORDED_FILE_NAME);
status = Status.STATUS_READY;
}
public void setAudioInput(int audioInput) {
this.audioInput = audioInput;
}
public void setAudioSampleRate(int audioSampleRate) {
this.audioSampleRate = audioSampleRate;
}
public void setAudioChannel(int audioChannel) {
this.audioChannel = audioChannel;
}
/**
* This method is to start recording using AudioRecord, also has NoiseSuppressor and AutomaticGainControl enabled
*/
public void startRecord() {
bufferSizeInBytes = AudioRecord.getMinBufferSize(audioSampleRate,
audioChannel, audioEncode);
audioRecord = new AudioRecord(audioInput, audioSampleRate, audioChannel, audioEncode, bufferSizeInBytes);
if (status == Status.STATUS_NO_READY) {
throw new IllegalStateException("not init");
}
if (status == Status.STATUS_START) {
throw new IllegalStateException("is recording ");
}
Log.d("AudioRecorder", "===startRecord===" + audioRecord.getState());
audioRecord.startRecording();
new Thread(new Runnable() {
#Override
public void run() {
NoiseSuppressor noiseSuppressor = NoiseSuppressor.create(audioRecord.getAudioSessionId());
if (noiseSuppressor != null) {
noiseSuppressor.setEnabled(true);
}
AutomaticGainControl automaticGainControl = AutomaticGainControl.create(audioRecord.getAudioSessionId());
if (automaticGainControl != null) {
automaticGainControl.setEnabled(true);
}
recordToFile();
}
}).start();
}
public void stop() {
if (status != Status.STATUS_START) {
throw new IllegalStateException("not recording");
} else {
stopRecorder();
// convertPCmFile();
makeDestFile();
status = Status.STATUS_READY;
}
}
private void convertPCmFile() {
File file = new File(AudioFileUtils.getPcmFileAbsolutePath(RECORDED_GREETING_FILE_NAME)); // for ex. path= "/sdcard/samplesound.pcm" or "/sdcard/samplesound.wav"
byte[] byteData = new byte[(int) file.length()];
try {
FileInputStream in = new FileInputStream(file);
in.read(byteData);
in.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
// Set and push to audio track..
int intSize = android.media.AudioTrack.getMinBufferSize(audioSampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
AudioTrack at = new AudioTrack(AudioManager.STREAM_MUSIC, audioSampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, intSize, AudioTrack.MODE_STREAM);
if (at != null) {
at.play();
// Write the byte array to the track
at.write(byteData, 0, byteData.length);
at.stop();
at.release();
} else
Log.d("TCAudio", "audio track is not initialised ");
}
private void makeDestFile() {
new Thread() {
#Override
public void run() {
File file = new File(pcmFileName);
try {
//Step 1: create input stream from generated pcm audio data
byte[] pcmData = new byte[(int) file.length()];
FileInputStream inputStream1 = new FileInputStream(file);
int readBytes = inputStream1.read(pcmData);
inputStream1.close();
//Step 2: calculate the size that has to be sent to constructor which is half the size of actual pcm audio data
int size = UlawEncoderInputStream.maxAbsPcm(pcmData, 0, pcmData.length / 2);
//Step 3: send the input stream as well as the size to the constructor
FileInputStream inputStream2 = new FileInputStream(file);
UlawEncoderInputStream ulawEncoderInputStream = new UlawEncoderInputStream(inputStream2, size);
//Step 4: create byte[] with size of half of bytes of pcm audio data
byte[] ulawData = new byte[pcmData.length / 2];
//Step 5: call read from UlawEncoderInputStream with above pcmData which is newly created
int nRead;
nRead = ulawEncoderInputStream.read(ulawData);
//Step 6: create wav header
byte[] wavHeader = wavFileHeader(ulawData.length, ulawData.length + 36, audioSampleRate, audioChannel, audioSampleRate);
//Step 7: combine wav header and encodedUlawBuffer in one byte[]
byte[] allByteArray = new byte[wavHeader.length + ulawData.length];
ByteBuffer buff = ByteBuffer.wrap(allByteArray);
buff.put(wavHeader);
buff.put(ulawData);
//Step 8 : writing the combined data into a new file
OutputStream outputStream = new FileOutputStream(new File(AudioFileUtils.getWavFileAbsolutePath(RECORDED_FILE_NAME)));
outputStream.write(allByteArray);
outputStream.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
releaseRecorder();
}
}.run();
}
private byte[] wavFileHeader(long totalAudioLen, long totalDataLen, long longSampleRate,
int channels, long byteRate) {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF/WAVE header
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 7; // format = 7, for ulaw
header[21] = 0;
header[22] = (byte) (channels & 0xff);
header[23] = (byte) ((channels >> 8) & 0xFF);
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) ((channels * 8) / 8);//
// block align
header[33] = 0;
header[34] = 8; // bits per sample
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
return header;
}
public void release() {
stopRecorder();
releaseRecorder();
status = Status.STATUS_READY;
}
private void releaseRecorder() {
if (audioRecord != null) {
audioRecord.release();
audioRecord = null;
}
}
private void stopRecorder() {
if (audioRecord != null) {
try {
audioRecord.stop();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* linear PCM data recorded to file
*/
private void recordToFile() {
byte[] audiodata = new byte[bufferSizeInBytes];
int readsize = 0;
try {
fosPcm = new FileOutputStream(pcmFileName, true);
} catch (FileNotFoundException e) {
Log.e("AudioRecorder", e.getMessage());
}
status = Status.STATUS_START;
while (status == Status.STATUS_START && audioRecord != null) {
readsize = audioRecord.read(audiodata, 0, bufferSizeInBytes);
if (AudioRecord.ERROR_INVALID_OPERATION != readsize && fosPcm != null) {
try {
//get the volumn 1--10
int sum = 0;
for (int i = 0; i < readsize; i++) {
sum += Math.abs(audiodata[i]);
}
if (readsize > 0) {
int raw = sum / readsize;
lastVolumn = raw > 32 ? raw - 32 : 0;
Log.i(TAG, "writeDataTOFile: volumn -- " + raw + " / lastvolumn -- " + lastVolumn);
}
if (readsize > 0 && readsize <= audiodata.length)
fosPcm.write(audiodata, 0, readsize);
} catch (IOException e) {
Log.e("AudioRecorder", e.getMessage());
}
}
}
try {
if (fosPcm != null) {
fosPcm.close();
}
} catch (IOException e) {
Log.e("AudioRecorder", e.getMessage());
}
}
public Status getStatus() {
return status;
}
public enum Status {
STATUS_NO_READY,
STATUS_READY,
STATUS_START,
STATUS_PAUSE,
STATUS_STOP
}
}
I have AudioFileUtils.java that just provides the path and appending of the extension to save
public class AudioFileUtils {
private static String rootPath = "audiorecord";
private final static String AUDIO_PCM_BASEPATH = "/" + rootPath + "/pcm/";
private final static String AUDIO_WAV_BASEPATH = "/" + rootPath + "/wav/";
private static void setRootPath(String rootPath) {
AudioFileUtils.rootPath = rootPath;
}
public static String getPcmFileAbsolutePath(String fileName) {
if (TextUtils.isEmpty(fileName)) {
throw new NullPointerException("fileName isEmpty");
}
String mAudioRawPath = "";
if (!fileName.endsWith(".pcm")) {
fileName = fileName + ".pcm";
}
String fileBasePath = SampleApp.getInstance().getApplicationContext().getExternalFilesDir(null) + AUDIO_PCM_BASEPATH;
File file = new File(fileBasePath);
if (!file.exists()) {
file.mkdirs();
}
mAudioRawPath = fileBasePath + fileName;
return mAudioRawPath;
}
public static String getWavFileAbsolutePath(String fileName) {
if (fileName == null) {
throw new NullPointerException("fileName can't be null");
}
String mAudioWavPath = "";
if (!fileName.endsWith(".wav")) {
fileName = fileName + ".wav";
}
String fileBasePath = SampleApp.getInstance().getApplicationContext().getExternalFilesDir(null) + AUDIO_WAV_BASEPATH;
CoxApplication.getInstance().getAccountManager().setMessageFilePath(fileBasePath);
File file = new File(fileBasePath);
if (!file.exists()) {
file.mkdirs();
}
mAudioWavPath = fileBasePath + fileName;
return mAudioWavPath;
}
}
I am using UlawEncoderInputStream.java which I found here with some changes to it, can be found below
public class UlawEncoderInputStream extends InputStream {
private final static String TAG = "UlawEncoderInputStream";
private final static int MAX_ULAW = 8192;
private final static int SCALE_BITS = 16;
private InputStream mIn;
private int mMax = 0;
// this buffer needs to be LARGER than the largest possible file size for
// a 30 second PCM file recorded at either 8000 Hz or 44.1 KHz.
// If it is smaller, the file will be cut off.
private final byte[] mBuf = new byte[1048576];
private int mBufCount = 0; // should be 0 or 1
private final byte[] mOneByte = new byte[1];
public static void encode(byte[] pcmBuf, int pcmOffset,
byte[] ulawBuf, int ulawOffset, int length, int max) {
// from 'ulaw' in wikipedia
// +8191 to +8159 0x80
// +8158 to +4063 in 16 intervals of 256 0x80 + interval number
// +4062 to +2015 in 16 intervals of 128 0x90 + interval number
// +2014 to +991 in 16 intervals of 64 0xA0 + interval number
// +990 to +479 in 16 intervals of 32 0xB0 + interval number
// +478 to +223 in 16 intervals of 16 0xC0 + interval number
// +222 to +95 in 16 intervals of 8 0xD0 + interval number
// +94 to +31 in 16 intervals of 4 0xE0 + interval number
// +30 to +1 in 15 intervals of 2 0xF0 + interval number
// 0 0xFF
// -1 0x7F
// -31 to -2 in 15 intervals of 2 0x70 + interval number
// -95 to -32 in 16 intervals of 4 0x60 + interval number
// -223 to -96 in 16 intervals of 8 0x50 + interval number
// -479 to -224 in 16 intervals of 16 0x40 + interval number
// -991 to -480 in 16 intervals of 32 0x30 + interval number
// -2015 to -992 in 16 intervals of 64 0x20 + interval number
// -4063 to -2016 in 16 intervals of 128 0x10 + interval number
// -8159 to -4064 in 16 intervals of 256 0x00 + interval number
// -8192 to -8160 0x00
// set scale factors
if (max <= 0) max = MAX_ULAW;
int coef = MAX_ULAW * (1 << SCALE_BITS) / max;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[pcmOffset++]) + (pcmBuf[pcmOffset++] << 8);
pcm = (pcm * coef) >> SCALE_BITS;
int ulaw;
if (pcm >= 0) {
ulaw = pcm <= 0 ? 0xff :
pcm <= 30 ? 0xf0 + (( 30 - pcm) >> 1) :
pcm <= 94 ? 0xe0 + (( 94 - pcm) >> 2) :
pcm <= 222 ? 0xd0 + (( 222 - pcm) >> 3) :
pcm <= 478 ? 0xc0 + (( 478 - pcm) >> 4) :
pcm <= 990 ? 0xb0 + (( 990 - pcm) >> 5) :
pcm <= 2014 ? 0xa0 + ((2014 - pcm) >> 6) :
pcm <= 4062 ? 0x90 + ((4062 - pcm) >> 7) :
pcm <= 8158 ? 0x80 + ((8158 - pcm) >> 8) :
0x80;
} else {
ulaw = -1 <= pcm ? 0x7f :
-31 <= pcm ? 0x70 + ((pcm - -31) >> 1) :
-95 <= pcm ? 0x60 + ((pcm - -95) >> 2) :
-223 <= pcm ? 0x50 + ((pcm - -223) >> 3) :
-479 <= pcm ? 0x40 + ((pcm - -479) >> 4) :
-991 <= pcm ? 0x30 + ((pcm - -991) >> 5) :
-2015 <= pcm ? 0x20 + ((pcm - -2015) >> 6) :
-4063 <= pcm ? 0x10 + ((pcm - -4063) >> 7) :
-8159 <= pcm ? 0x00 + ((pcm - -8159) >> 8) :
0x00;
}
ulawBuf[ulawOffset++] = (byte)ulaw;
}
}
/**
* Compute the maximum of the absolute value of the pcm samples.
* The return value can be used to set ulaw encoder scaling.
* #param pcmBuf array containing 16 bit pcm data.
* #param offset offset of start of 16 bit pcm data.
* #param length number of pcm samples (not number of input bytes)
* #return maximum abs of pcm data values
*/
public static int maxAbsPcm(byte[] pcmBuf, int offset, int length) {
int max = 0;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[offset++]) + (pcmBuf[offset++] << 8);
if (pcm < 0) pcm = -pcm;
if (pcm > max) max = pcm;
}
return max;
}
/**
* Create an InputStream which takes 16 bit pcm data and produces ulaw data.
* #param in InputStream containing 16 bit pcm data.
* #param max pcm value corresponding to maximum ulaw value.
*/
public UlawEncoderInputStream(InputStream in, int max) {
mIn = in;
mMax = max;
}
#Override
public int read(byte[] buf, int offset, int length) throws IOException {
if (mIn == null) throw new IllegalStateException("not open");
// return at least one byte, but try to fill 'length'
while (mBufCount < 2) {
int n = mIn.read(mBuf, mBufCount, Math.min(length * 2, mBuf.length - mBufCount));
if (n == -1) return -1;
mBufCount += n;
}
// compand data
int n = Math.min(mBufCount / 2, length);
encode(mBuf, 0, buf, offset, n, mMax);
// move data to bottom of mBuf
mBufCount -= n * 2;
for (int i = 0; i < mBufCount; i++) mBuf[i] = mBuf[i + n * 2];
return n;
}
/*public byte[] getUpdatedBuffer(){
return mBuf;
}*/
#Override
public int read(byte[] buf) throws IOException {
return read(buf, 0, buf.length);
}
#Override
public int read() throws IOException {
int n = read(mOneByte, 0, 1);
if (n == -1) return -1;
return 0xff & (int)mOneByte[0];
}
#Override
public void close() throws IOException {
if (mIn != null) {
InputStream in = mIn;
mIn = null;
in.close();
}
}
#Override
public int available() throws IOException {
return (mIn.available() + mBufCount) / 2;
}
}
After doing all of this, as I said the voice is too screeching and noisy. Not sure where am I wrong.
Any help, is appreciated! Thank you in advance.
I am trying to recieve an streaming audio from my app.
below is my code for recieving audio stream:
public class ClientListen implements Runnable {
private Context context;
public ClientListen(Context context) {
this.context = context;
}
#Override
public void run() {
boolean run = true;
try {
DatagramSocket udpSocket = new DatagramSocket(8765);
InetAddress serverAddr = null;
try {
serverAddr = InetAddress.getByName("127.0.0.1");
} catch (UnknownHostException e) {
e.printStackTrace();
}
while (run) {
try {
byte[] message = new byte[8000];
DatagramPacket packet = new DatagramPacket(message,message.length);
Log.i("UDP client: ", "about to wait to receive");
udpSocket.setSoTimeout(10000);
udpSocket.receive(packet);
String text = new String(packet.getData(), 0, packet.getLength());
Log.d("Received text", text);
} catch (IOException e) {
Log.e(" UDP clien", "error: ", e);
run = false;
udpSocket.close();
}
}
} catch (SocketException e) {
Log.e("Socket Open:", "Error:", e);
} catch (IOException e) {
e.printStackTrace();
}
}
}
In Received text logger i can see data as coming as
D/Received text: �������n�����������q�9�$�0�/�G�{�������s�����JiH&������d�����Z���������d�����E������C�+
��l��y�����������v���9����������u��f�j�������$�����K���������F��~R�2�����T��������������L�����!��G��8������s�;�"�,�R�����(��{�����*_��Z�������5������������\������x���j~������������/��=�����%�������
How can store this data into a wav file ?
What you see is the string representation of single udp packet after it was received and the received block has just being released.
It is a very small fraction of the sound you want to convert to wave.
Soon the while loop will continue and you will receive another packet and many more..
You need to collect all the packets in a buffer and then when you think it is ok - convert them to wave file.
Remember Wave is not just the sound bytes you get from udp but also 44 bytes of prefix you need to add to this file in order to be recognized by players.
Also if the udp is from another encoding format such as G711 - you must encode these bytes to PCM – if not you will hear heavy noise in the
Sound of the wave or the stream you play.
The buffer must be accurate. if it will be too big (many empty bytes in the end of the array) you will hear a sound of helicopter. if you know exactly what is the size of each packet then you can just write it to AudioTrack in order to play stream, or accumulate it and convert it to wave file when you will see fit. But If you are not sure about the size you can use this answer to get a buffer and then write the buffer to AudioTrack:
Android AudioRecord to Server over UDP Playback Issues.
they use Javax because it is very old answer but you just need to use AudioTrack instead in order to stream. It is not in this scope so I will just present the AudioTrack streaming replacements instead of Javax SourceDataLine:
final int SAMPLE_RATE = 8000; // Hertz
final int STREAM_TYPE = AudioManager.STREAM_NOTIFICATION;
int channelConfig = AudioFormat.CHANNEL_OUT_MONO;
int encodingFormat = AudioFormat.ENCODING_PCM_16BIT;
AudioTrack track = new AudioTrack(STREAM_TYPE, SAMPLE_RATE, channelConfig,
encodingFormat, BUF_SIZE, AudioTrack.MODE_STREAM);
track.play();
//.. then after receive UDP packets and the buffer is full:
if(track != null && packet != null){
track.write(audioStreamBuffer, 0, audioStreamBuffer.length);
}
You must not do this in the UI thread (I assume you know that).
In the code I will show you - I am getting udp of audio logs from PTT radio. It is encoded in G711 Ulaw . each packet is of 172 bytes exactly. First 12 bytes are for RTP and I need to offset (remove) them in order to eliminate small noises. rest 160 bytes are 20MS of sound.
I must decode the G711 Ulaw bytes to PCM shorts array. Then to take the short array and to make a wave file out of it. I am taking it after I see there was no packet receiving for more than one second (so I know the speech ended and the new block release is because of a new speech so I can take the old speech and make a wave file out of it). You can decide of a different buffer depends on what you are doing.
It works fine. After the decoding the sound of the wave is very good. If you have UDP with PCM so you don’t need to decode G711 - just skip this part.
Finally I want to mention I saw many old answers with code parts using javax.sound.sampled that seems great because it can convert easily an audio file or stream to wave format with AudioFileFormat
And also convert G711 to pcm with AudioFormat manipulations. But unfortunately it is not part of current java for android. We must count on android AudioTrack instead (and AudioRecord if we want to get the sound from the mic) but AudioTrack play only PCM and do not support G711 format – so when streaming G711 with AudioTrack the noise is terrible. We must decode it in our code before writing it to the track. Also we cannot convert to wave file using audioInputStream – I tried to do this easily with javax.sound.sampled jar file I added to my app but android keep giving me errors such as format not supported for wave, and mixer errors when try to stream – so I understood latest android cannot work with javax.sound.sampled and I went to look for law level decoding of G711 and law level creation of wave file out of the buffer of byte array received from the UDP packets .
A. in manifest add:
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE"/>
<uses-permission android:name="android.permission.READ_EXTERNAL_STORAGE"/>
<uses-permission android:name="android.permission.INTERNET"/>
B. in the worker thread:
#Override
public void run(){
Log.i(TAG, "ClientListen thread started. Thread id: " + Thread.currentThread().getId());
try{
udpSocket = new DatagramSocket(port);
}catch(SocketException e){
e.printStackTrace();
}
byte[] messageBuf = new byte[BUF_SIZE];
Log.i(TAG, "waiting to receive packet in port: " + port);
if(udpSocket != null){
// here you can create new AudioTrack and play.track
byte pttSession[] = null;
while (running){
packet = new DatagramPacket(messageBuf, 0, messageBuf.length);
Log.d(TAG, "inside while running loop");
try{
Log.d(TAG, "receive block: waiting for user to press on
speaker(listening now inside udpSocket for DatagramPacket..)");
//get inside receive block until packet will arrive through this socket
long timeBeforeBlock = System.currentTimeMillis();
udpSocket.receive(packet);
Log.d(TAG, "client received a packet, receive block stopped)");
//this is for sending msg handler to the UI tread (you may skip this)
sendState("getting UDP packets...");
/* if previous block release happened more than one second ago - so this
packet release is for a new speech. so let’s copy the previous speech
to a wave file and empty the speech */
if(System.currentTimeMillis() - timeBeforeBlock > 1000 && pttSession != null){
convertBytesToFile(pttSession);
pttSession = null;
}
/* let’s take the packet that was released and start new speech or add it to the ongoing speech. */
byte[] slice = Arrays.copyOfRange(packet.getData(), 12, packet.getLength());
if(null == pttSession){
pttSession = slice;
}else{
pttSession = concat(pttSession, slice);
Log.d(TAG, "pttSession:" + Arrays.toString(pttSession));
}
}catch(IOException e){
Log.e(TAG, "UDP client IOException - error: ", e);
running = false;
}
}
// let’s take the latest speech and make a last wave file out of it.
if(pttSession != null){
convertBytesToFile(pttSession);
pttSession = null;
}
// if running == false then stop listen.
udpSocket.close();
handler.sendEmptyMessage(MainActivity.UdpClientHandler.UPDATE_END);
}else{
sendState("cannot bind datagram socket to the specified port:" + port);
}
}
private void convertBytesToFile(byte[] byteArray){
//decode the bytes from G711U to PCM (outcome is a short array)
G711UCodec decoder = new G711UCodec();
int size = byteArray.length;
short[] shortArray = new short[size];
decoder.decode(shortArray, byteArray, size, 0);
String newFileName = "speech_" + System.currentTimeMillis() + ".wav";
//convert short array to wav (add 44 prefix shorts) and save it as a .wav file
Wave wave = new Wave(SAMPLE_RATE, (short) 1, shortArray, 0, shortArray.length - 1);
if(wave.writeToFile(Environment.getExternalStoragePublicDirectory
(Environment.DIRECTORY_DOWNLOADS),newFileName)){
Log.d(TAG, "wave.writeToFile successful!");
sendState("create file: "+ newFileName);
}else{
Log.w(TAG, "wave.writeToFile failed");
}
}
C. encoding/decoding G711 U-Law class:
taken from: https://github.com/thinktube-kobe/airtube/blob/master/JavaLibrary/src/com/thinktube/audio/G711UCodec.java
/**
* G.711 codec. This class provides u-law conversion.
*/
public class G711UCodec {
// s00000001wxyz...s000wxyz
// s0000001wxyza...s001wxyz
// s000001wxyzab...s010wxyz
// s00001wxyzabc...s011wxyz
// s0001wxyzabcd...s100wxyz
// s001wxyzabcde...s101wxyz
// s01wxyzabcdef...s110wxyz
// s1wxyzabcdefg...s111wxyz
private static byte[] table13to8 = new byte[8192];
private static short[] table8to16 = new short[256];
static {
// b13 --> b8
for (int p = 1, q = 0; p <= 0x80; p <<= 1, q += 0x10) {
for (int i = 0, j = (p << 4) - 0x10; i < 16; i++, j += p) {
int v = (i + q) ^ 0x7F;
byte value1 = (byte) v;
byte value2 = (byte) (v + 128);
for (int m = j, e = j + p; m < e; m++) {
table13to8[m] = value1;
table13to8[8191 - m] = value2;
}
}
}
// b8 --> b16
for (int q = 0; q <= 7; q++) {
for (int i = 0, m = (q << 4); i < 16; i++, m++) {
int v = (((i + 0x10) << q) - 0x10) << 3;
table8to16[m ^ 0x7F] = (short) v;
table8to16[(m ^ 0x7F) + 128] = (short) (65536 - v);
}
}
}
public int decode(short[] b16, byte[] b8, int count, int offset) {
for (int i = 0, j = offset; i < count; i++, j++) {
b16[i] = table8to16[b8[j] & 0xFF];
}
return count;
}
public int encode(short[] b16, int count, byte[] b8, int offset) {
for (int i = 0, j = offset; i < count; i++, j++) {
b8[j] = table13to8[(b16[i] >> 4) & 0x1FFF];
}
return count;
}
public int getSampleCount(int frameSize) {
return frameSize;
}
}
D. Converting to wave file:
Taken from here:
https://github.com/google/oboe/issues/320
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
public class Wave
{
private final int LONGINT = 4;
private final int SMALLINT = 2;
private final int INTEGER = 4;
private final int ID_STRING_SIZE = 4;
private final int WAV_RIFF_SIZE = LONGINT+ID_STRING_SIZE;
private final int WAV_FMT_SIZE = (4*SMALLINT)+(INTEGER*2)+LONGINT+ID_STRING_SIZE;
private final int WAV_DATA_SIZE = ID_STRING_SIZE+LONGINT;
private final int WAV_HDR_SIZE = WAV_RIFF_SIZE+ID_STRING_SIZE+WAV_FMT_SIZE+WAV_DATA_SIZE;
private final short PCM = 1;
private final int SAMPLE_SIZE = 2;
int cursor, nSamples;
byte[] output;
public Wave(int sampleRate, short nChannels, short[] data, int start, int end)
{
nSamples=end-start+1;
cursor=0;
output=new byte[nSamples*SMALLINT+WAV_HDR_SIZE];
buildHeader(sampleRate,nChannels);
writeData(data,start,end);
}
/*
by Udi for using byteArray directly
*/
public Wave(int sampleRate, short nChannels, byte[] data, int start, int end)
{
int size = data.length;
short[] shortArray = new short[size];
for (int index = 0; index < size; index++){
shortArray[index] = (short) data[index];
}
nSamples=end-start+1;
cursor=0;
output=new byte[nSamples*SMALLINT+WAV_HDR_SIZE];
buildHeader(sampleRate,nChannels);
writeData(shortArray,start,end);
}
// ------------------------------------------------------------
private void buildHeader(int sampleRate, short nChannels)
{
write("RIFF");
write(output.length);
write("WAVE");
writeFormat(sampleRate, nChannels);
}
// ------------------------------------------------------------
public void writeFormat(int sampleRate, short nChannels)
{
write("fmt ");
write(WAV_FMT_SIZE-WAV_DATA_SIZE);
write(PCM);
write(nChannels);
write(sampleRate);
write(nChannels * sampleRate * SAMPLE_SIZE);
write((short)(nChannels * SAMPLE_SIZE));
write((short)16);
}
// ------------------------------------------------------------
public void writeData(short[] data, int start, int end)
{
write("data");
write(nSamples*SMALLINT);
for(int i=start; i<=end; write(data[i++]));
}
// ------------------------------------------------------------
private void write(byte b)
{
output[cursor++]=b;
}
// ------------------------------------------------------------
private void write(String id)
{
if(id.length()!=ID_STRING_SIZE){
}
else {
for(int i=0; i<ID_STRING_SIZE; ++i) write((byte)id.charAt(i));
}
}
// ------------------------------------------------------------
private void write(int i)
{
write((byte) (i&0xFF)); i>>=8;
write((byte) (i&0xFF)); i>>=8;
write((byte) (i&0xFF)); i>>=8;
write((byte) (i&0xFF));
}
// ------------------------------------------------------------
private void write(short i)
{
write((byte) (i&0xFF)); i>>=8;
write((byte) (i&0xFF));
}
// ------------------------------------------------------------
public boolean writeToFile(File fileParent , String filename)
{
boolean ok=false;
try {
File path=new File(fileParent, filename);
FileOutputStream outFile = new FileOutputStream(path);
outFile.write(output);
outFile.close();
ok=true;
} catch (FileNotFoundException e) {
e.printStackTrace();
ok=false;
} catch (IOException e) {
ok=false;
e.printStackTrace();
}
return ok;
}
/**
* by Udi for test: write file with temp name so if you write many packets each packet will be written to a new file instead of deleting
* the previous file. (this is mainly for debug)
* #param fileParent
* #param filename
* #return
*/
public boolean writeToTmpFile(File fileParent , String filename)
{
boolean ok=false;
try {
File outputFile = File.createTempFile(filename, ".wav",fileParent);
FileOutputStream fileoutputstream = new FileOutputStream(outputFile);
fileoutputstream.write(output);
fileoutputstream.close();
ok=true;
} catch (FileNotFoundException e) {
e.printStackTrace();
ok=false;
} catch (IOException e) {
ok=false;
e.printStackTrace();
}
return ok;
}
}
I have read a lot of pages about Android's AudioRecorder. You can see a list of them below the question.
I'm trying to record audio with AudioRecorder, but it's not working well.
public class MainActivity extends Activity {
AudioRecord ar = null;
int buffsize = 0;
int blockSize = 256;
boolean isRecording = false;
private Thread recordingThread = null;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
}
public void baslat(View v)
{
// when click to START
buffsize = AudioRecord.getMinBufferSize(44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
ar = new AudioRecord(MediaRecorder.AudioSource.MIC, 44100, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, buffsize);
ar.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
public void run() {
writeAudioDataToFile();
}
}, "AudioRecorder Thread");
recordingThread.start();
}
public void durdur(View v)
{
// When click to STOP
ar.stop();
isRecording = false;
}
private void writeAudioDataToFile() {
// Write the output audio in byte
String filePath = "/sdcard/voice8K16bitmono.wav";
short sData[] = new short[buffsize/2];
FileOutputStream os = null;
try {
os = new FileOutputStream(filePath);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
while (isRecording) {
// gets the voice output from microphone to byte format
ar.read(sData, 0, buffsize/2);
Log.d("eray","Short wirting to file" + sData.toString());
try {
// // writes the data to file from buffer
// // stores the voice buffer
byte bData[] = short2byte(sData);
os.write(bData, 0, buffsize);
} catch (IOException e) {
e.printStackTrace();
}
}
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
private byte[] short2byte(short[] sData) {
int shortArrsize = sData.length;
byte[] bytes = new byte[shortArrsize * 2];
for (int i = 0; i < shortArrsize; i++) {
bytes[i * 2] = (byte) (sData[i] & 0x00FF);
bytes[(i * 2) + 1] = (byte) (sData[i] >> 8);
sData[i] = 0;
}
return bytes;
}
It's creating a .wav file but, when I try to listen to it, it's not opening. I'm getting a "file not supported" error. I've tried to play the file with quite a few media player applications.
NOTE : I have to use AudioRecorder instead of MediaRecorder because my app will be doing another process while recording (displaying an equalizer) .
Here is the list of pages that I've read about this subject:
http://developer.android.com/reference/android/media/AudioRecord.html#read(short[],%20int,%20int)
Android AudioRecord example
http://audiorecordandroid.blogspot.in
AudioRecord object not initializing
Recording a wav file from the mic in Android - problems
http://i-liger.com/article/android-wav-audio-recording
Creating a WAV file from raw PCM data using the Android SDK
Capturing Sound for Analysis and Visualizing Frequencies in Android
There are a lot of different ways to go about this. I've tried lots of them but nothing works for me. I've been working on this problem for about 6 hours now so I would appreciate a definitive answer, ideally some sample code.
I wrote a simple (by which you should read, not to professional standards) class to do this yesterday, and it works.
private class Wave {
private final int LONGINT = 4;
private final int SMALLINT = 2;
private final int INTEGER = 4;
private final int ID_STRING_SIZE = 4;
private final int WAV_RIFF_SIZE = LONGINT + ID_STRING_SIZE;
private final int WAV_FMT_SIZE = (4 * SMALLINT) + (INTEGER * 2) + LONGINT + ID_STRING_SIZE;
private final int WAV_DATA_SIZE = ID_STRING_SIZE + LONGINT;
private final int WAV_HDR_SIZE = WAV_RIFF_SIZE + ID_STRING_SIZE + WAV_FMT_SIZE + WAV_DATA_SIZE;
private final short PCM = 1;
private final int SAMPLE_SIZE = 2;
int cursor, nSamples;
byte[] output;
public Wave(int sampleRate, short nChannels, short[] data, int start, int end) {
nSamples = end - start + 1;
cursor = 0;
output = new byte[nSamples * SMALLINT + WAV_HDR_SIZE];
buildHeader(sampleRate, nChannels);
writeData(data, start, end);
}
// ------------------------------------------------------------
private void buildHeader(int sampleRate, short nChannels) {
write("RIFF");
write(output.length);
write("WAVE");
writeFormat(sampleRate, nChannels);
}
// ------------------------------------------------------------
public void writeFormat(int sampleRate, short nChannels) {
write("fmt ");
write(WAV_FMT_SIZE - WAV_DATA_SIZE);
write(PCM);
write(nChannels);
write(sampleRate);
write(nChannels * sampleRate * SAMPLE_SIZE);
write((short) (nChannels * SAMPLE_SIZE));
write((short) 16);
}
// ------------------------------------------------------------
public void writeData(short[] data, int start, int end) {
write("data");
write(nSamples * SMALLINT);
for (int i = start; i <= end; write(data[i++])) ;
}
// ------------------------------------------------------------
private void write(byte b) {
output[cursor++] = b;
}
// ------------------------------------------------------------
private void write(String id) {
if (id.length() != ID_STRING_SIZE)
Utils.logError("String " + id + " must have four characters.");
else {
for (int i = 0; i < ID_STRING_SIZE; ++i) write((byte) id.charAt(i));
}
}
// ------------------------------------------------------------
private void write(int i) {
write((byte) (i & 0xFF));
i >>= 8;
write((byte) (i & 0xFF));
i >>= 8;
write((byte) (i & 0xFF));
i >>= 8;
write((byte) (i & 0xFF));
}
// ------------------------------------------------------------
private void write(short i) {
write((byte) (i & 0xFF));
i >>= 8;
write((byte) (i & 0xFF));
}
// ------------------------------------------------------------
public boolean wroteToFile(String filename) {
boolean ok = false;
try {
File path = new File(getFilesDir(), filename);
FileOutputStream outFile = new FileOutputStream(path);
outFile.write(output);
outFile.close();
ok = true;
} catch (FileNotFoundException e) {
e.printStackTrace();
ok = false;
} catch (IOException e) {
ok = false;
e.printStackTrace();
}
return ok;
}
}
Hope this helps
PCMAudioHelper solved my problem. I'll modify this answer and explain it but firstly i have to do some tests over this class.
You might find this OMRECORDER helpful for recording .WAV format.
In case if .aac works with you then check out this WhatsappAudioRecorder:
On startRecording button click :
Initialise new thread.
Create file with .aac extension.
Create output stream of file.
Set output
SetListener and execute thread.
OnStopClick :
Interrupt the thread and audio will be saved in file.
Here is full gist of for reference :
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.media.MediaRecorder;
import android.os.Build;
import android.util.Log;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
public class AudioRecordThread implements Runnable {
private static final String TAG = AudioRecordThread.class.getSimpleName();
private static final int SAMPLE_RATE = 44100;
private static final int SAMPLE_RATE_INDEX = 4;
private static final int CHANNELS = 1;
private static final int BIT_RATE = 32000;
private final int bufferSize;
private final MediaCodec mediaCodec;
private final AudioRecord audioRecord;
private final OutputStream outputStream;
private OnRecorderFailedListener onRecorderFailedListener;
AudioRecordThread(OutputStream outputStream, OnRecorderFailedListener onRecorderFailedListener) throws IOException {
this.bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
this.audioRecord = createAudioRecord(this.bufferSize);
this.mediaCodec = createMediaCodec(this.bufferSize);
this.outputStream = outputStream;
this.onRecorderFailedListener = onRecorderFailedListener;
this.mediaCodec.start();
try {
audioRecord.startRecording();
} catch (Exception e) {
Log.w(TAG, e);
mediaCodec.release();
throw new IOException(e);
}
}
#Override
public void run() {
if (onRecorderFailedListener != null) {
Log.d(TAG, "onRecorderStarted");
onRecorderFailedListener.onRecorderStarted();
}
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
ByteBuffer[] codecInputBuffers = mediaCodec.getInputBuffers();
ByteBuffer[] codecOutputBuffers = mediaCodec.getOutputBuffers();
try {
while (!Thread.interrupted()) {
boolean success = handleCodecInput(audioRecord, mediaCodec, codecInputBuffers, Thread.currentThread().isAlive());
if (success)
handleCodecOutput(mediaCodec, codecOutputBuffers, bufferInfo, outputStream);
}
} catch (IOException e) {
Log.w(TAG, e);
} finally {
mediaCodec.stop();
audioRecord.stop();
mediaCodec.release();
audioRecord.release();
try {
outputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
private boolean handleCodecInput(AudioRecord audioRecord,
MediaCodec mediaCodec, ByteBuffer[] codecInputBuffers,
boolean running) throws IOException {
byte[] audioRecordData = new byte[bufferSize];
int length = audioRecord.read(audioRecordData, 0, audioRecordData.length);
if (length == AudioRecord.ERROR_BAD_VALUE ||
length == AudioRecord.ERROR_INVALID_OPERATION ||
length != bufferSize) {
if (length != bufferSize) {
if (onRecorderFailedListener != null) {
Log.d(TAG, "length != BufferSize calling onRecordFailed");
onRecorderFailedListener.onRecorderFailed();
}
return false;
}
}
int codecInputBufferIndex = mediaCodec.dequeueInputBuffer(10 * 1000);
if (codecInputBufferIndex >= 0) {
ByteBuffer codecBuffer = codecInputBuffers[codecInputBufferIndex];
codecBuffer.clear();
codecBuffer.put(audioRecordData);
mediaCodec.queueInputBuffer(codecInputBufferIndex, 0, length, 0, running ? 0 : MediaCodec.BUFFER_FLAG_END_OF_STREAM);
}
return true;
}
private void handleCodecOutput(MediaCodec mediaCodec,
ByteBuffer[] codecOutputBuffers,
MediaCodec.BufferInfo bufferInfo,
OutputStream outputStream)
throws IOException {
int codecOutputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0);
while (codecOutputBufferIndex != MediaCodec.INFO_TRY_AGAIN_LATER) {
if (codecOutputBufferIndex >= 0) {
ByteBuffer encoderOutputBuffer = codecOutputBuffers[codecOutputBufferIndex];
encoderOutputBuffer.position(bufferInfo.offset);
encoderOutputBuffer.limit(bufferInfo.offset + bufferInfo.size);
if ((bufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != MediaCodec.BUFFER_FLAG_CODEC_CONFIG) {
byte[] header = createAdtsHeader(bufferInfo.size - bufferInfo.offset);
outputStream.write(header);
byte[] data = new byte[encoderOutputBuffer.remaining()];
encoderOutputBuffer.get(data);
outputStream.write(data);
}
encoderOutputBuffer.clear();
mediaCodec.releaseOutputBuffer(codecOutputBufferIndex, false);
} else if (codecOutputBufferIndex == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
codecOutputBuffers = mediaCodec.getOutputBuffers();
}
codecOutputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0);
}
}
private byte[] createAdtsHeader(int length) {
int frameLength = length + 7;
byte[] adtsHeader = new byte[7];
adtsHeader[0] = (byte) 0xFF; // Sync Word
adtsHeader[1] = (byte) 0xF1; // MPEG-4, Layer (0), No CRC
adtsHeader[2] = (byte) ((MediaCodecInfo.CodecProfileLevel.AACObjectLC - 1) << 6);
adtsHeader[2] |= (((byte) SAMPLE_RATE_INDEX) << 2);
adtsHeader[2] |= (((byte) CHANNELS) >> 2);
adtsHeader[3] = (byte) (((CHANNELS & 3) << 6) | ((frameLength >> 11) & 0x03));
adtsHeader[4] = (byte) ((frameLength >> 3) & 0xFF);
adtsHeader[5] = (byte) (((frameLength & 0x07) << 5) | 0x1f);
adtsHeader[6] = (byte) 0xFC;
return adtsHeader;
}
private AudioRecord createAudioRecord(int bufferSize) {
AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO,
AudioFormat.ENCODING_PCM_16BIT, bufferSize * 10);
if (audioRecord.getState() != AudioRecord.STATE_INITIALIZED) {
Log.d(TAG, "Unable to initialize AudioRecord");
throw new RuntimeException("Unable to initialize AudioRecord");
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
if (android.media.audiofx.NoiseSuppressor.isAvailable()) {
android.media.audiofx.NoiseSuppressor noiseSuppressor = android.media.audiofx.NoiseSuppressor
.create(audioRecord.getAudioSessionId());
if (noiseSuppressor != null) {
noiseSuppressor.setEnabled(true);
}
}
}
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
if (android.media.audiofx.AutomaticGainControl.isAvailable()) {
android.media.audiofx.AutomaticGainControl automaticGainControl = android.media.audiofx.AutomaticGainControl
.create(audioRecord.getAudioSessionId());
if (automaticGainControl != null) {
automaticGainControl.setEnabled(true);
}
}
}
return audioRecord;
}
private MediaCodec createMediaCodec(int bufferSize) throws IOException {
MediaCodec mediaCodec = MediaCodec.createEncoderByType("audio/mp4a-latm");
MediaFormat mediaFormat = new MediaFormat();
mediaFormat.setString(MediaFormat.KEY_MIME, "audio/mp4a-latm");
mediaFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE, SAMPLE_RATE);
mediaFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, CHANNELS);
mediaFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, bufferSize);
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
mediaFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
try {
mediaCodec.configure(mediaFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
} catch (Exception e) {
Log.w(TAG, e);
mediaCodec.release();
throw new IOException(e);
}
return mediaCodec;
}
interface OnRecorderFailedListener {
void onRecorderFailed();
void onRecorderStarted();
}
}
I would add this as a comment but I don't yet have enough Stackoverflow rep points...
Opiatefuchs's link takes you to sample code that shows you the exact header formatting necessary to create a .wav file. I've been all over that code myself. Very helpful.
First you need know that wav file has its format -- header. so you can't just write the pure data to the .wav file.
Second the wav file header include the length of file . so you need write the header after recording.
My solution is , user AudioRecorder record the pcm file .
byte[] audiodata = new byte[bufferSizeInBytes];
FileOutputStream fos = null;
int readsize = 0;
try {
fos = new FileOutputStream(pcmFileName, true);
} catch (FileNotFoundException e) {
Log.e("AudioRecorder", e.getMessage());
}
status = Status.STATUS_START;
while (status == Status.STATUS_START && audioRecord != null) {
readsize = audioRecord.read(audiodata, 0, bufferSizeInBytes);
if (AudioRecord.ERROR_INVALID_OPERATION != readsize && fos != null) {
if (readsize > 0 && readsize <= audiodata.length)
fos.write(audiodata, 0, readsize);
} catch (IOException e) {
Log.e("AudioRecorder", e.getMessage());
}
}
}
try {
if (fos != null) {
fos.close();
}
} catch (IOException e) {
Log.e("AudioRecorder", e.getMessage());
}
then convert it to wav file.
byte buffer[] = null;
int TOTAL_SIZE = 0;
File file = new File(pcmPath);
if (!file.exists()) {
return false;
}
TOTAL_SIZE = (int) file.length();
WaveHeader header = new WaveHeader();
header.fileLength = TOTAL_SIZE + (44 - 8);
header.FmtHdrLeth = 16;
header.BitsPerSample = 16;
header.Channels = 1;
header.FormatTag = 0x0001;
header.SamplesPerSec = 8000;
header.BlockAlign = (short) (header.Channels * header.BitsPerSample / 8);
header.AvgBytesPerSec = header.BlockAlign * header.SamplesPerSec;
header.DataHdrLeth = TOTAL_SIZE;
byte[] h = null;
try {
h = header.getHeader();
} catch (IOException e1) {
Log.e("PcmToWav", e1.getMessage());
return false;
}
if (h.length != 44)
return false;
File destfile = new File(destinationPath);
if (destfile.exists())
destfile.delete();
try {
buffer = new byte[1024 * 4]; // Length of All Files, Total Size
InputStream inStream = null;
OutputStream ouStream = null;
ouStream = new BufferedOutputStream(new FileOutputStream(
destinationPath));
ouStream.write(h, 0, h.length);
inStream = new BufferedInputStream(new FileInputStream(file));
int size = inStream.read(buffer);
while (size != -1) {
ouStream.write(buffer);
size = inStream.read(buffer);
}
inStream.close();
ouStream.close();
} catch (FileNotFoundException e) {
Log.e("PcmToWav", e.getMessage());
return false;
} catch (IOException ioe) {
Log.e("PcmToWav", ioe.getMessage());
return false;
}
if (deletePcmFile) {
file.delete();
}
Log.i("PcmToWav", "makePCMFileToWAVFile success!" + new SimpleDateFormat("yyyy-MM-dd hh:mm").format(new Date()));
return true;
I have been following this tutorial on using LAME mp3 on Android with jni. Recording seems to be working and I am getting an output as mp3 but upon playback the audio has been slowed down and pitched down.
I've tried to put all pertinent code below. Any guidance on why this is happening? Thanks in advance for your help.
Edit: OK so just to check I imported the raw data into Audacity and that plays back fine so this must be an issue at the encoding stage.
Java class:
public class Record extends Activity implements OnClickListener {
static {
System.loadLibrary("mp3lame");
}
private native void initEncoder(int numChannels, int sampleRate, int bitRate, int mode, int quality);
private native void destroyEncoder();
private native int encodeFile(String sourcePath, String targetPath);
private static final int RECORDER_BPP = 16;
private static final String AUDIO_RECORDER_FILE_EXT_WAV = ".wav";
private static final String AUDIO_RECORDER_FOLDER = "AberdeenSoundsites";
private static final String AUDIO_RECORDER_TEMP_FILE = "record_temp.raw";
private static final int[] RECORDER_SAMPLERATES = {44100, 22050, 11025, 8000};
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_STEREO;
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
public static final int NUM_CHANNELS = 2;
public static final int SAMPLE_RATE = 44100;
public static final int BITRATE = 320;
public static final int MODE = 1;
public static final int QUALITY = 2;
private short[] mBuffer;
private File rawFile;
private File encodedFile;
private int sampleRate;
private String filename;
private AudioRecord recorder = null;
private int bufferSize = 0;
private Thread recordingThread = null;
private boolean isRecording = false;
#Override
public void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.record);
initEncoder(NUM_CHANNELS, SAMPLE_RATE, BITRATE, MODE, QUALITY);
stopButton = (Button) findViewById(R.id.stop_button);
stopButton.setOnClickListener(this);
timer = (TextView) findViewById(R.id.recording_time);
bufferSize = AudioRecord.getMinBufferSize(44100, RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING);
}
private void startRecording() {
stopped = false;
stopButton.setText(R.string.stop_button_label);
// Set up and start audio recording
recorder = findAudioRecord();
recorder.startRecording();
isRecording = true;
rawFile = getFile("raw");
mBuffer = new short[bufferSize];
startBufferedWrite(rawFile);
}
private void stopRecording() {
mHandler.removeCallbacks(startTimer);
stopped = true;
if(recorder != null){
isRecording = false;
recorder.stop();
recorder.release();
recorder = null;
recordingThread = null;
}
encodedFile = getFile("mp3");
int result = encodeFile(rawFile.getAbsolutePath(), encodedFile.getAbsolutePath());
if (result == 0) {
Toast.makeText(Record.this, "Encoded to " + encodedFile.getName(), Toast.LENGTH_SHORT)
.show();
}
}
private void startBufferedWrite(final File file) {
new Thread(new Runnable() {
#Override
public void run() {
Looper.prepare();
DataOutputStream output = null;
try {
output = new DataOutputStream(new BufferedOutputStream(new FileOutputStream(file)));
while (isRecording) {
int readSize = recorder.read(mBuffer, 0, mBuffer.length);
for (int i = 0; i < readSize; i++) {
output.writeShort(mBuffer[i]);
}
}
} catch (IOException e) {
Toast.makeText(Record.this, e.getMessage(), Toast.LENGTH_SHORT).show();
} finally {
if (output != null) {
try {
output.flush();
} catch (IOException e) {
Toast.makeText(Record.this, e.getMessage(), Toast.LENGTH_SHORT).show();
} finally {
try {
output.close();
} catch (IOException e) {
Toast.makeText(Record.this, e.getMessage(), Toast.LENGTH_SHORT).show();
}
}
}
}
}
}).start();
}
private File getFile(final String suffix) {
Time time = new Time();
time.setToNow();
return new File(Environment.getExternalStorageDirectory()+"/MyAppFolder", time.format("%Y%m%d%H%M%S") + "." + suffix);
}
public AudioRecord findAudioRecord() {
for (int rate : RECORDER_SAMPLERATES) {
for (short audioFormat : new short[] { AudioFormat.ENCODING_PCM_16BIT, AudioFormat.ENCODING_PCM_8BIT }) {
for (short channelConfig : new short[] { AudioFormat.CHANNEL_IN_STEREO, AudioFormat.CHANNEL_IN_MONO }) {
try {
Log.d("AberdeenSoundsites", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: "
+ channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(MediaRecorder.AudioSource.MIC, rate, channelConfig, audioFormat, bufferSize);
sampleRate = rate;
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
return recorder;
}
} catch (Exception e) {
Log.e("MyApp", rate + "Exception, keep trying.",e);
}
}
}
}
Log.e("MyApp", "No settings worked :(");
return null;
}
C wrapper:
#include <stdio.h>
#include <stdlib.h>
#include <jni.h>
#include <android/log.h>
#include "libmp3lame/lame.h"
#define LOG_TAG "LAME ENCODER"
#define LOGD(format, args...) __android_log_print(ANDROID_LOG_DEBUG, LOG_TAG, format, ##args);
#define BUFFER_SIZE 8192
#define be_short(s) ((short) ((unsigned short) (s) << 8) | ((unsigned short) (s) >> 8))
lame_t lame;
int read_samples(FILE *input_file, short *input) {
int nb_read;
nb_read = fread(input, 1, sizeof(short), input_file) / sizeof(short);
int i = 0;
while (i < nb_read) {
input[i] = be_short(input[i]);
i++;
}
return nb_read;
}
void Java_myPacakage_myApp_Record_initEncoder(JNIEnv *env,
jobject jobj, jint in_num_channels, jint in_samplerate, jint in_brate,
jint in_mode, jint in_quality) {
lame = lame_init();
LOGD("Init parameters:");
lame_set_num_channels(lame, in_num_channels);
LOGD("Number of channels: %d", in_num_channels);
lame_set_in_samplerate(lame, in_samplerate);
LOGD("Sample rate: %d", in_samplerate);
lame_set_brate(lame, in_brate);
LOGD("Bitrate: %d", in_brate);
lame_set_mode(lame, in_mode);
LOGD("Mode: %d", in_mode);
lame_set_quality(lame, in_quality);
LOGD("Quality: %d", in_quality);
int res = lame_init_params(lame);
LOGD("Init returned: %d", res);
}
void Java_myPacakage_myApp_Record_destroyEncoder(
JNIEnv *env, jobject jobj) {
int res = lame_close(lame);
LOGD("Deinit returned: %d", res);
}
void Java_myPacakage_myApp_Record_encodeFile(JNIEnv *env,
jobject jobj, jstring in_source_path, jstring in_target_path) {
const char *source_path, *target_path;
source_path = (*env)->GetStringUTFChars(env, in_source_path, NULL);
target_path = (*env)->GetStringUTFChars(env, in_target_path, NULL);
FILE *input_file, *output_file;
input_file = fopen(source_path, "rb");
output_file = fopen(target_path, "wb");
short input[BUFFER_SIZE];
char output[BUFFER_SIZE];
int nb_read = 0;
int nb_write = 0;
int nb_total = 0;
LOGD("Encoding started");
while (nb_read = read_samples(input_file, input)) {
nb_write = lame_encode_buffer(lame, input, input, nb_read, output,
BUFFER_SIZE);
fwrite(output, nb_write, 1, output_file);
nb_total += nb_write;
}
LOGD("Encoded %d bytes", nb_total);
nb_write = lame_encode_flush(lame, output, BUFFER_SIZE);
fwrite(output, nb_write, 1, output_file);
LOGD("Flushed %d bytes", nb_write);
fclose(input_file);
fclose(output_file);
}
Edit - ok so out of interest I downloaded the apk the tutorial provides to my phone and ran it. That works fine. So this would suggest the problem is less with the tutorial and more something I've done. I will re-look over this when I have some time available and see if I can determine where I went wrong
You call initEncoder with 2 channels, and initialize AudioRecord with STEREO and MONO, but wrapper.c can only deal with 1 channel:
nb_write = lame_encode_buffer(lame, input, input, nb_read, output, BUFFER_SIZE);
The above codes require that the source audio is mono with 1 channel. If you want to support STEREO, pay attention to lame_encode_buffer method
int CDECL lame_encode_buffer (
lame_global_flags* gfp, /* global context handle */
const short int buffer_l [], /* PCM data for left channel */
const short int buffer_r [], /* PCM data for right channel */
const int nsamples, /* number of samples per channel */
unsigned char* mp3buf, /* pointer to encoded MP3 stream */
const int mp3buf_size ); /* number of valid octets in this
stream */
To give you a pointer, you need to invoke lame_encode_buffer_interleaved() if you use 2 channels (.stereo) to record.
It took me a few days to figure it out, this is the code you can use:
if (lame_get_num_channels(glf) == 2)
{
result = lame_encode_buffer_interleaved(glf, j_buffer_l, samples/2, j_mp3buf, mp3buf_size);
}
else
{
result = lame_encode_buffer(glf, j_buffer_l, j_buffer_r, samples, j_mp3buf, mp3buf_size);
}
You stimulated me to look at my problem again and I found the problem for me. Maybe it is what is happening for you. Check the sample rate of the wav file you were using. I assumed or looked at mine too quickly and thought it said 44100; but it was 48000! I fixed my problem with:
lame_set_in_samplerate(lame, 48000);
lame_set_out_samplerate(lame, 44100);
Perhaps your code isn't reading the correct in sample rate for some odd reason?
You can rewrite
nb_write = lame_encode_buffer(lame, input, input, nb_read, output, BUFFER_SIZE);
to
nb_write = lame_encode_buffer(lame, input1, input2, nb_read, output, BUFFER_SIZE);
and use 2 mono raw files as an input. Of course you will have to adapt your encodeFile - function so that it takes two strings as source and handles everything twice.
I have written code for recording audio file using AudioRecord and while writing file on SD card i am making two version.
Version 1
Recorded file is saved on SD Card as it is.
Version 2
I am applying Gain feature on recorded file and saving on SD card.
This works awesome on Sony Ericson mobiles.Also audio volume is boost to great extent.
But i am struggling to make it work on Samsung Devices.
when i play recorded file it sound like Talking Tom :P
Initially i thought Samusung device did not like the combinations i have used to create
AudioRecorder.
So i used following approach in which i loop into available configuration and use best configuration to initialize AudioRecord.
public AudioRecord findAudioRecord() {
for (int rate: mSampleRates) {
for (short audioFormat: new short[] {
AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT
}) {
for (short channelConfig: new short[] {
AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO
}) {
try {
Log.i("vipul", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(
AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) return recorder;
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
return null;
}
Below is Code that is working good on Sony mobiles.But Struggling to work on Samsung Devices.
public class EnvironmentRecorder extends Activity implements OnClickListener {
private static final int RECORDER_BPP = 16;
private static final String AUDIO_RECORDER_FILE_EXT_WAV = ".wav";
private static final String AUDIO_RECORDER_FOLDER = "MyRecorder";
private static final String AUDIO_RECORDER_TEMP_FILE = "record_temp.raw";
private static final int RECORDER_SAMPLERATE = 44100;
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_STEREO;
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
private Button start, stop;
private AudioRecord recorder = null;
private int bufferSize = 0;
private Thread recordingThread = null;
private boolean isRecording = false;
private static int[] mSampleRates = new int[] {
8000, 11025, 22050, 44100
};
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
start = (Button) findViewById(R.id.start);
stop = (Button) findViewById(R.id.stop);
start.setOnClickListener(this);
stop.setOnClickListener(this);
}
#Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.start:
startRecord();
break;
case R.id.stop:
stopRecording();
break;
}
}
public EnvironmentRecorder() {
try {
bufferSize = AudioRecord.getMinBufferSize(RECORDER_SAMPLERATE, RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING);
} catch (Exception e) {
e.printStackTrace();
}
}
private String getFilename1() {
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath, AUDIO_RECORDER_FOLDER);
if (!file.exists()) {
file.mkdirs();
}
return (file.getAbsolutePath() + "/" + "NotGained" + AUDIO_RECORDER_FILE_EXT_WAV);
}
private String getFilename2() {
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath, AUDIO_RECORDER_FOLDER);
if (!file.exists()) {
file.mkdirs();
}
return (file.getAbsolutePath() + "/" + "Gained" + AUDIO_RECORDER_FILE_EXT_WAV);
}
private String getTempFilename() {
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath, AUDIO_RECORDER_FOLDER);
if (!file.exists()) {
file.mkdirs();
}
File tempFile = new File(filepath, AUDIO_RECORDER_TEMP_FILE);
if (tempFile.exists()) tempFile.delete();
return (file.getAbsolutePath() + "/" + AUDIO_RECORDER_TEMP_FILE);
}
public AudioRecord findAudioRecord() {
for (int rate: mSampleRates) {
for (short audioFormat: new short[] {
AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT
}) {
for (short channelConfig: new short[] {
AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO
}) {
try {
Log.v("vipul", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(
AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) return recorder;
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
return null;
}
public void startRecord() {
/*
* recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
* RECORDER_SAMPLERATE, RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING,
* bufferSize);
*/
recorder = findAudioRecord();
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
#Override
public void run() {
writeAudioDataToFile();
}
}, "AudioRecorder Thread");
recordingThread.start();
}
private void writeAudioDataToFile() {
byte data[] = new byte[bufferSize];
String filename = getTempFilename();
FileOutputStream os = null;
try {
os = new FileOutputStream(filename);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
int read = 0;
if (null != os) {
while (isRecording) {
read = recorder.read(data, 0, bufferSize);
if (AudioRecord.ERROR_INVALID_OPERATION != read) {
try {
os.write(data);
} catch (IOException e) {
e.printStackTrace();
}
}
}
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
public void stopRecording() {
if (null != recorder) {
isRecording = false;
recorder.stop();
recorder.release();
recorder = null;
recordingThread = null;
copyWaveFile(getTempFilename(), getFilename1(), getFilename2());
deleteTempFile();
}
}
private void deleteTempFile() {
File file = new File(getTempFilename());
file.delete();
}
private void copyWaveFile(String inFilename, String outFileName1, String outFileName2) {
FileInputStream in = null;
FileOutputStream out1 = null, out2 = null;
long totalAudioLen = 0;
long totalDataLen = totalAudioLen + 36;
long longSampleRate = RECORDER_SAMPLERATE;
int channels = 2;
long byteRate = RECORDER_BPP * RECORDER_SAMPLERATE * channels / 8;
byte[] data = new byte[bufferSize];
try { in = new FileInputStream(inFilename);
out1 = new FileOutputStream(outFileName1);
out2 = new FileOutputStream(outFileName2);
totalAudioLen = in .getChannel().size();
totalDataLen = totalAudioLen + 36;
WriteWaveFileHeader(out1, totalAudioLen, totalDataLen, longSampleRate, channels, byteRate);
WriteWaveFileHeader(out2, totalAudioLen, totalDataLen, longSampleRate, channels, byteRate);
while ( in .read(data) != -1) {
out1.write(data); // Writing Non-Gained Data
float rGain = 2.5f;
for (int i = 0; i < data.length / 2; i++) {
short curSample = getShort(data[i * 2], data[i * 2 + 1]);
if (rGain != 1) {
// apply gain
curSample *= rGain;
// convert back from short sample that was "gained" to
// byte data
byte[] a = getByteFromShort(curSample);
// modify buffer to contain the gained sample
data[i * 2] = a[0];
data[i * 2 + 1] = a[1];
}
}
out2.write(data); // Writing Gained Data
}
out1.close();
out2.close(); in .close();
Toast.makeText(this, "Done!!", Toast.LENGTH_LONG).show();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
private short getShort(byte argB1, byte argB2) {
return (short)((argB1 & 0xff) | (argB2 << 8));
}
private byte[] getByteFromShort(short x) {
// variant 1 - noise
byte[] a = new byte[2];
a[0] = (byte)(x & 0xff);
a[1] = (byte)((x >> 8) & 0xff);
// variant 2 - noise and almost broke my ears - very loud
// ByteBuffer buffer = ByteBuffer.allocate(2);
// buffer.putShort(x);
// buffer.flip();
return a;
}
private void WriteWaveFileHeader(FileOutputStream out, long totalAudioLen, long totalDataLen, long longSampleRate, int channels, long byteRate)
throws IOException {
byte[] header = new byte[44];
header[0] = 'R';
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte)(totalDataLen & 0xff);
header[5] = (byte)((totalDataLen >> 8) & 0xff);
header[6] = (byte)((totalDataLen >> 16) & 0xff);
header[7] = (byte)((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f';
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16;
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1;
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte)(longSampleRate & 0xff);
header[25] = (byte)((longSampleRate >> 8) & 0xff);
header[26] = (byte)((longSampleRate >> 16) & 0xff);
header[27] = (byte)((longSampleRate >> 24) & 0xff);
header[28] = (byte)(byteRate & 0xff);
header[29] = (byte)((byteRate >> 8) & 0xff);
header[30] = (byte)((byteRate >> 16) & 0xff);
header[31] = (byte)((byteRate >> 24) & 0xff);
header[32] = (byte)(2 * 16 / 8);
header[33] = 0;
header[34] = RECORDER_BPP;
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte)(totalAudioLen & 0xff);
header[41] = (byte)((totalAudioLen >> 8) & 0xff);
header[42] = (byte)((totalAudioLen >> 16) & 0xff);
header[43] = (byte)((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
}
I would like to know if i need to add any extra loc to make my AudioRecord comfortable with Samsung Devices.
We're also struggling with audio recording on some Samsung Android Devices. Unfortunately it seems to be very broken, as even different revisions of the same phone model are behaving differently with the same codebase.
Here are my current findings, hoping you find something useful:
1. Broken Initialization:
Unfortunately, the strategy you are using to query for valid recording configurations will fail at least on Samsung Galaxy Young and Ace models running Android 2.3
The problem is that some invalid AudioRecord configurations instead of simply failing, will completely brick the audio capture subsystem if tried. You'll need to reset the phone to recover from this state.
2. Inconsistent Sampling-Rate support along revisions of same phone model
On an older Galaxy Ace Phone, recording # 11025Hz, 16-bit mono will succeed. On newer Ace revisions, this AudioRecord configuration will be accepted as valid, but the resulting recording will be distorted, with a "chipmunk" effect. A very popular guitar tuner app that has hardcoded this sampling rate is failing to give proper tuning readings on these phones precisely because of this problem!
3. Extremely low volume audio capture on some configurations.
In Galaxy Young and Galaxy Ace, recording from the mic or default audio source # 44,100Hz (the supposedly canonical rate where everything should work fine) produces an undistorted, but extremely low-volume recording. I haven't found yet a way to fix this other than software amplification (which is the equivalent of magnifying a very low res image, with the consecuent "jageddnes" of the result).
4. Failure to support the canonical 44,100Hz sampling rate on every audio capture source.
In Galaxy Young and Galaxy Ace, recording from the Camcorder source fails # 44,100Hz. (again, the configuration will be accepted as valid) producing complete garbage. However, recording # 8,000Hz, 16,000Hz and 48,000Hz works fine and produces a recording with very acceptable volume levels. What is frustrating is that according to the Android documentation, 44,100Hz is a sampling rate all devices SHOULD support.
5. OpenSL does not fix any of the problems reported.
Working with the NDK and OpenSL produces the same described results. It seems that the AudioRecorder class is simply wrapping calls to OpenSL, and the problem is either hardware based, or buried at a lower-level tier in the kernel code.
This situation is very unfortunately indeed, as these models are becoming very popular - at least in Mexico.
Good luck - and please report if you had better luck working with these phones.
=)
Audio gain conrol To increase the amplitude of the audio u need to calculate the gain factor and multiply calculated gain factor with every sample captured. The following code does that. P.S. Ignore the unrelated code
public class MainActivity extends Activity {
public static final int SAMPLE_RATE = 16000;
private AudioRecord mRecorder;
private File mRecording;
private short[] mBuffer;
private final String startRecordingLabel = "Start recording";
private final String stopRecordingLabel = "Stop recording";
private boolean mIsRecording = false;
private ProgressBar mProgressBar;
float iGain = 1.0f;
CheckBox gain;
protected int bitsPerSamples = 16;
#Override
public void onCreate(final Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.fragment_main);
initRecorder();
Button bluetooth = (Button)findViewById(R.id.blue);
gain = (CheckBox) findViewById(R.id.checkBox1);
mProgressBar = (ProgressBar) findViewById(R.id.progressBar);
final Button button = (Button) findViewById(R.id.start);
button.setText(startRecordingLabel);
bluetooth.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
// TODO Auto-generated method stub
Intent i = new Intent("");
}
});
gain.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
#Override
public void onCheckedChanged(CompoundButton buttonView,
boolean isChecked) {
if (gain.isChecked()) {
iGain = 5.0f;
} else {
iGain = 2.0f;
}
}
});
button.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(final View v) {
if (!mIsRecording) {
button.setText(stopRecordingLabel);
mIsRecording = true;
mRecorder.startRecording();
mRecording = getFile("raw");
startBufferedWrite(mRecording);
} else {
button.setText(startRecordingLabel);
mIsRecording = false;
mRecorder.stop();
File waveFile = getFile("wav");
try {
rawToWave(mRecording, waveFile);
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
}
Toast.makeText(MainActivity.this,
"Recorded to " + waveFile.getName(),
Toast.LENGTH_SHORT).show();
}
}
});
}
#Override
public void onDestroy() {
mRecorder.release();
super.onDestroy();
}
private void initRecorder() {
int bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
mBuffer = new short[bufferSize];
mRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT,
bufferSize);
}
private void startBufferedWrite(final File file) {
new Thread(new Runnable() {
#Override
public void run() {
DataOutputStream output = null;
try {
output = new DataOutputStream(new BufferedOutputStream(
new FileOutputStream(file)));
while (mIsRecording) {
double sum = 0;
int readSize = mRecorder.read(mBuffer, 0,
mBuffer.length);
final int bytesPerSample = bitsPerSamples / 8;
final int emptySpace = 64 - bitsPerSamples;
int byteIndex = 0;
int byteIndex2 = 0;
int temp = 0;
int mLeftTemp = 0;
int mRightTemp = 0;
int a = 0;
int x = 0;
for (int frameIndex = 0; frameIndex < readSize; frameIndex++) {
for (int c = 0; c < 1; c++) {
if (iGain != 1) {
long accumulator = 0;
for (int b = 0; b < bytesPerSample; b++) {
accumulator += ((long) (mBuffer[byteIndex++] & 0xFF)) << (b * 8 + emptySpace);
}
double sample = ((double) accumulator / (double) Long.MAX_VALUE);
sample *= iGain;
int intValue = (int) ((double) sample * (double) Integer.MAX_VALUE);
for (int i = 0; i < bytesPerSample; i++) {
mBuffer[i + byteIndex2] = (byte) (intValue >>> ((i + 2) * 8) & 0xff);
}
byteIndex2 += bytesPerSample;
}
}// end for(channel)
// mBuffer[frameIndex] *=iGain;
if (mBuffer[frameIndex] > 32765) {
mBuffer[frameIndex] = 32767;
} else if (mBuffer[frameIndex] < -32767) {
mBuffer[frameIndex] = -32767;
}
output.writeShort(mBuffer[frameIndex]);
sum += mBuffer[frameIndex] * mBuffer[frameIndex];
}
if (readSize > 0) {
final double amplitude = sum / readSize;
mProgressBar.setProgress((int) Math.sqrt(amplitude));
}
}
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
} finally {
mProgressBar.setProgress(0);
if (output != null) {
try {
output.flush();
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
} finally {
try {
output.close();
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
}
}
}
}
}
}).start();
}
private void rawToWave(final File rawFile, final File waveFile)
throws IOException {
byte[] rawData = new byte[(int) rawFile.length()];
DataInputStream input = null;
try {
input = new DataInputStream(new FileInputStream(rawFile));
input.read(rawData);
} finally {
if (input != null) {
input.close();
}
}
DataOutputStream output = null;
try {
output = new DataOutputStream(new FileOutputStream(waveFile));
// WAVE header
// see http://ccrma.stanford.edu/courses/422/projects/WaveFormat/
writeString(output, "RIFF"); // chunk id
writeInt(output, 36 + rawData.length); // chunk size
writeString(output, "WAVE"); // format
writeString(output, "fmt "); // subchunk 1 id
writeInt(output, 16); // subchunk 1 size
writeShort(output, (short) 1); // audio format (1 = PCM)
writeShort(output, (short) 1); // number of channels
writeInt(output, SAMPLE_RATE); // sample rate
writeInt(output, SAMPLE_RATE * 2); // byte rate
writeShort(output, (short) 2); // block align
writeShort(output, (short) 16); // bits per sample
writeString(output, "data"); // subchunk 2 id
writeInt(output, rawData.length); // subchunk 2 size
// Audio data (conversion big endian -> little endian)
short[] shorts = new short[rawData.length / 2];
ByteBuffer.wrap(rawData).order(ByteOrder.LITTLE_ENDIAN)
.asShortBuffer().get(shorts);
ByteBuffer bytes = ByteBuffer.allocate(shorts.length * 2);
for (short s : shorts) {
// Apply Gain
/*
* s *= iGain; if(s>32767) { s=32767; } else if(s<-32768) {
* s=-32768; }
*/
bytes.putShort(s);
}
output.write(bytes.array());
} finally {
if (output != null) {
output.close();
}
}
}
private File getFile(final String suffix) {
Time time = new Time();
time.setToNow();
return new File(Environment.getExternalStorageDirectory(),
time.format("%Y%m%d%H%M%S") + "." + suffix);
}
private void writeInt(final DataOutputStream output, final int value)
throws IOException {
output.write(value >> 0);
output.write(value >> 8);
output.write(value >> 16);
output.write(value >> 24);
}
private void writeShort(final DataOutputStream output, final short value)
throws IOException {
output.write(value >> 0);
output.write(value >> 8);
}
private void writeString(final DataOutputStream output, final String value)
throws IOException {
for (int i = 0; i < value.length(); i++) {
output.write(value.charAt(i));
}
}
}