I'm trying to generate a wav file with ulaw encoding from raw pcm data.
I have tried some of the solutions by finding in google and here, but I can't seem to make the audio nicely playable. Its noisy and sort of screenching sound. May be the audio is playing too fast, not sure.
So, here is the code that I have tried so far.
I have my AudioRecorder.java that has the recording and saving that data as raw pcm file, converting of raw pcm data to ulaw encoded data, creation of wav header and then combining both the streams of data and saving as one .wav file
public class AudioRecorder {
private static final String TAG = "AudioRecorder";
private int audioInput = MediaRecorder.AudioSource.MIC;
private int audioSampleRate = 8000; //frequency which ranges from 8K Hz to 44.1K Hz
private int audioChannel = AudioFormat.CHANNEL_IN_MONO;
private int audioEncode = AudioFormat.ENCODING_PCM_16BIT;
private int bufferSizeInBytes = 0;
private AudioRecord audioRecord;
private Status status = Status.STATUS_NO_READY;
protected String pcmFileName;
private int currentPosition = 0;
private int lastVolumn = 0;
private FileOutputStream fosPcm = null;
public AudioRecorder() {
pcmFileName = AudioFileUtils.getPcmFileAbsolutePath(RECORDED_FILE_NAME);
status = Status.STATUS_READY;
}
public void setAudioInput(int audioInput) {
this.audioInput = audioInput;
}
public void setAudioSampleRate(int audioSampleRate) {
this.audioSampleRate = audioSampleRate;
}
public void setAudioChannel(int audioChannel) {
this.audioChannel = audioChannel;
}
/**
* This method is to start recording using AudioRecord, also has NoiseSuppressor and AutomaticGainControl enabled
*/
public void startRecord() {
bufferSizeInBytes = AudioRecord.getMinBufferSize(audioSampleRate,
audioChannel, audioEncode);
audioRecord = new AudioRecord(audioInput, audioSampleRate, audioChannel, audioEncode, bufferSizeInBytes);
if (status == Status.STATUS_NO_READY) {
throw new IllegalStateException("not init");
}
if (status == Status.STATUS_START) {
throw new IllegalStateException("is recording ");
}
Log.d("AudioRecorder", "===startRecord===" + audioRecord.getState());
audioRecord.startRecording();
new Thread(new Runnable() {
#Override
public void run() {
NoiseSuppressor noiseSuppressor = NoiseSuppressor.create(audioRecord.getAudioSessionId());
if (noiseSuppressor != null) {
noiseSuppressor.setEnabled(true);
}
AutomaticGainControl automaticGainControl = AutomaticGainControl.create(audioRecord.getAudioSessionId());
if (automaticGainControl != null) {
automaticGainControl.setEnabled(true);
}
recordToFile();
}
}).start();
}
public void stop() {
if (status != Status.STATUS_START) {
throw new IllegalStateException("not recording");
} else {
stopRecorder();
// convertPCmFile();
makeDestFile();
status = Status.STATUS_READY;
}
}
private void convertPCmFile() {
File file = new File(AudioFileUtils.getPcmFileAbsolutePath(RECORDED_GREETING_FILE_NAME)); // for ex. path= "/sdcard/samplesound.pcm" or "/sdcard/samplesound.wav"
byte[] byteData = new byte[(int) file.length()];
try {
FileInputStream in = new FileInputStream(file);
in.read(byteData);
in.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
// Set and push to audio track..
int intSize = android.media.AudioTrack.getMinBufferSize(audioSampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT);
AudioTrack at = new AudioTrack(AudioManager.STREAM_MUSIC, audioSampleRate, AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT, intSize, AudioTrack.MODE_STREAM);
if (at != null) {
at.play();
// Write the byte array to the track
at.write(byteData, 0, byteData.length);
at.stop();
at.release();
} else
Log.d("TCAudio", "audio track is not initialised ");
}
private void makeDestFile() {
new Thread() {
#Override
public void run() {
File file = new File(pcmFileName);
try {
//Step 1: create input stream from generated pcm audio data
byte[] pcmData = new byte[(int) file.length()];
FileInputStream inputStream1 = new FileInputStream(file);
int readBytes = inputStream1.read(pcmData);
inputStream1.close();
//Step 2: calculate the size that has to be sent to constructor which is half the size of actual pcm audio data
int size = UlawEncoderInputStream.maxAbsPcm(pcmData, 0, pcmData.length / 2);
//Step 3: send the input stream as well as the size to the constructor
FileInputStream inputStream2 = new FileInputStream(file);
UlawEncoderInputStream ulawEncoderInputStream = new UlawEncoderInputStream(inputStream2, size);
//Step 4: create byte[] with size of half of bytes of pcm audio data
byte[] ulawData = new byte[pcmData.length / 2];
//Step 5: call read from UlawEncoderInputStream with above pcmData which is newly created
int nRead;
nRead = ulawEncoderInputStream.read(ulawData);
//Step 6: create wav header
byte[] wavHeader = wavFileHeader(ulawData.length, ulawData.length + 36, audioSampleRate, audioChannel, audioSampleRate);
//Step 7: combine wav header and encodedUlawBuffer in one byte[]
byte[] allByteArray = new byte[wavHeader.length + ulawData.length];
ByteBuffer buff = ByteBuffer.wrap(allByteArray);
buff.put(wavHeader);
buff.put(ulawData);
//Step 8 : writing the combined data into a new file
OutputStream outputStream = new FileOutputStream(new File(AudioFileUtils.getWavFileAbsolutePath(RECORDED_FILE_NAME)));
outputStream.write(allByteArray);
outputStream.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
releaseRecorder();
}
}.run();
}
private byte[] wavFileHeader(long totalAudioLen, long totalDataLen, long longSampleRate,
int channels, long byteRate) {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF/WAVE header
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 7; // format = 7, for ulaw
header[21] = 0;
header[22] = (byte) (channels & 0xff);
header[23] = (byte) ((channels >> 8) & 0xFF);
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) ((channels * 8) / 8);//
// block align
header[33] = 0;
header[34] = 8; // bits per sample
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
return header;
}
public void release() {
stopRecorder();
releaseRecorder();
status = Status.STATUS_READY;
}
private void releaseRecorder() {
if (audioRecord != null) {
audioRecord.release();
audioRecord = null;
}
}
private void stopRecorder() {
if (audioRecord != null) {
try {
audioRecord.stop();
} catch (Exception e) {
e.printStackTrace();
}
}
}
/**
* linear PCM data recorded to file
*/
private void recordToFile() {
byte[] audiodata = new byte[bufferSizeInBytes];
int readsize = 0;
try {
fosPcm = new FileOutputStream(pcmFileName, true);
} catch (FileNotFoundException e) {
Log.e("AudioRecorder", e.getMessage());
}
status = Status.STATUS_START;
while (status == Status.STATUS_START && audioRecord != null) {
readsize = audioRecord.read(audiodata, 0, bufferSizeInBytes);
if (AudioRecord.ERROR_INVALID_OPERATION != readsize && fosPcm != null) {
try {
//get the volumn 1--10
int sum = 0;
for (int i = 0; i < readsize; i++) {
sum += Math.abs(audiodata[i]);
}
if (readsize > 0) {
int raw = sum / readsize;
lastVolumn = raw > 32 ? raw - 32 : 0;
Log.i(TAG, "writeDataTOFile: volumn -- " + raw + " / lastvolumn -- " + lastVolumn);
}
if (readsize > 0 && readsize <= audiodata.length)
fosPcm.write(audiodata, 0, readsize);
} catch (IOException e) {
Log.e("AudioRecorder", e.getMessage());
}
}
}
try {
if (fosPcm != null) {
fosPcm.close();
}
} catch (IOException e) {
Log.e("AudioRecorder", e.getMessage());
}
}
public Status getStatus() {
return status;
}
public enum Status {
STATUS_NO_READY,
STATUS_READY,
STATUS_START,
STATUS_PAUSE,
STATUS_STOP
}
}
I have AudioFileUtils.java that just provides the path and appending of the extension to save
public class AudioFileUtils {
private static String rootPath = "audiorecord";
private final static String AUDIO_PCM_BASEPATH = "/" + rootPath + "/pcm/";
private final static String AUDIO_WAV_BASEPATH = "/" + rootPath + "/wav/";
private static void setRootPath(String rootPath) {
AudioFileUtils.rootPath = rootPath;
}
public static String getPcmFileAbsolutePath(String fileName) {
if (TextUtils.isEmpty(fileName)) {
throw new NullPointerException("fileName isEmpty");
}
String mAudioRawPath = "";
if (!fileName.endsWith(".pcm")) {
fileName = fileName + ".pcm";
}
String fileBasePath = SampleApp.getInstance().getApplicationContext().getExternalFilesDir(null) + AUDIO_PCM_BASEPATH;
File file = new File(fileBasePath);
if (!file.exists()) {
file.mkdirs();
}
mAudioRawPath = fileBasePath + fileName;
return mAudioRawPath;
}
public static String getWavFileAbsolutePath(String fileName) {
if (fileName == null) {
throw new NullPointerException("fileName can't be null");
}
String mAudioWavPath = "";
if (!fileName.endsWith(".wav")) {
fileName = fileName + ".wav";
}
String fileBasePath = SampleApp.getInstance().getApplicationContext().getExternalFilesDir(null) + AUDIO_WAV_BASEPATH;
CoxApplication.getInstance().getAccountManager().setMessageFilePath(fileBasePath);
File file = new File(fileBasePath);
if (!file.exists()) {
file.mkdirs();
}
mAudioWavPath = fileBasePath + fileName;
return mAudioWavPath;
}
}
I am using UlawEncoderInputStream.java which I found here with some changes to it, can be found below
public class UlawEncoderInputStream extends InputStream {
private final static String TAG = "UlawEncoderInputStream";
private final static int MAX_ULAW = 8192;
private final static int SCALE_BITS = 16;
private InputStream mIn;
private int mMax = 0;
// this buffer needs to be LARGER than the largest possible file size for
// a 30 second PCM file recorded at either 8000 Hz or 44.1 KHz.
// If it is smaller, the file will be cut off.
private final byte[] mBuf = new byte[1048576];
private int mBufCount = 0; // should be 0 or 1
private final byte[] mOneByte = new byte[1];
public static void encode(byte[] pcmBuf, int pcmOffset,
byte[] ulawBuf, int ulawOffset, int length, int max) {
// from 'ulaw' in wikipedia
// +8191 to +8159 0x80
// +8158 to +4063 in 16 intervals of 256 0x80 + interval number
// +4062 to +2015 in 16 intervals of 128 0x90 + interval number
// +2014 to +991 in 16 intervals of 64 0xA0 + interval number
// +990 to +479 in 16 intervals of 32 0xB0 + interval number
// +478 to +223 in 16 intervals of 16 0xC0 + interval number
// +222 to +95 in 16 intervals of 8 0xD0 + interval number
// +94 to +31 in 16 intervals of 4 0xE0 + interval number
// +30 to +1 in 15 intervals of 2 0xF0 + interval number
// 0 0xFF
// -1 0x7F
// -31 to -2 in 15 intervals of 2 0x70 + interval number
// -95 to -32 in 16 intervals of 4 0x60 + interval number
// -223 to -96 in 16 intervals of 8 0x50 + interval number
// -479 to -224 in 16 intervals of 16 0x40 + interval number
// -991 to -480 in 16 intervals of 32 0x30 + interval number
// -2015 to -992 in 16 intervals of 64 0x20 + interval number
// -4063 to -2016 in 16 intervals of 128 0x10 + interval number
// -8159 to -4064 in 16 intervals of 256 0x00 + interval number
// -8192 to -8160 0x00
// set scale factors
if (max <= 0) max = MAX_ULAW;
int coef = MAX_ULAW * (1 << SCALE_BITS) / max;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[pcmOffset++]) + (pcmBuf[pcmOffset++] << 8);
pcm = (pcm * coef) >> SCALE_BITS;
int ulaw;
if (pcm >= 0) {
ulaw = pcm <= 0 ? 0xff :
pcm <= 30 ? 0xf0 + (( 30 - pcm) >> 1) :
pcm <= 94 ? 0xe0 + (( 94 - pcm) >> 2) :
pcm <= 222 ? 0xd0 + (( 222 - pcm) >> 3) :
pcm <= 478 ? 0xc0 + (( 478 - pcm) >> 4) :
pcm <= 990 ? 0xb0 + (( 990 - pcm) >> 5) :
pcm <= 2014 ? 0xa0 + ((2014 - pcm) >> 6) :
pcm <= 4062 ? 0x90 + ((4062 - pcm) >> 7) :
pcm <= 8158 ? 0x80 + ((8158 - pcm) >> 8) :
0x80;
} else {
ulaw = -1 <= pcm ? 0x7f :
-31 <= pcm ? 0x70 + ((pcm - -31) >> 1) :
-95 <= pcm ? 0x60 + ((pcm - -95) >> 2) :
-223 <= pcm ? 0x50 + ((pcm - -223) >> 3) :
-479 <= pcm ? 0x40 + ((pcm - -479) >> 4) :
-991 <= pcm ? 0x30 + ((pcm - -991) >> 5) :
-2015 <= pcm ? 0x20 + ((pcm - -2015) >> 6) :
-4063 <= pcm ? 0x10 + ((pcm - -4063) >> 7) :
-8159 <= pcm ? 0x00 + ((pcm - -8159) >> 8) :
0x00;
}
ulawBuf[ulawOffset++] = (byte)ulaw;
}
}
/**
* Compute the maximum of the absolute value of the pcm samples.
* The return value can be used to set ulaw encoder scaling.
* #param pcmBuf array containing 16 bit pcm data.
* #param offset offset of start of 16 bit pcm data.
* #param length number of pcm samples (not number of input bytes)
* #return maximum abs of pcm data values
*/
public static int maxAbsPcm(byte[] pcmBuf, int offset, int length) {
int max = 0;
for (int i = 0; i < length; i++) {
int pcm = (0xff & pcmBuf[offset++]) + (pcmBuf[offset++] << 8);
if (pcm < 0) pcm = -pcm;
if (pcm > max) max = pcm;
}
return max;
}
/**
* Create an InputStream which takes 16 bit pcm data and produces ulaw data.
* #param in InputStream containing 16 bit pcm data.
* #param max pcm value corresponding to maximum ulaw value.
*/
public UlawEncoderInputStream(InputStream in, int max) {
mIn = in;
mMax = max;
}
#Override
public int read(byte[] buf, int offset, int length) throws IOException {
if (mIn == null) throw new IllegalStateException("not open");
// return at least one byte, but try to fill 'length'
while (mBufCount < 2) {
int n = mIn.read(mBuf, mBufCount, Math.min(length * 2, mBuf.length - mBufCount));
if (n == -1) return -1;
mBufCount += n;
}
// compand data
int n = Math.min(mBufCount / 2, length);
encode(mBuf, 0, buf, offset, n, mMax);
// move data to bottom of mBuf
mBufCount -= n * 2;
for (int i = 0; i < mBufCount; i++) mBuf[i] = mBuf[i + n * 2];
return n;
}
/*public byte[] getUpdatedBuffer(){
return mBuf;
}*/
#Override
public int read(byte[] buf) throws IOException {
return read(buf, 0, buf.length);
}
#Override
public int read() throws IOException {
int n = read(mOneByte, 0, 1);
if (n == -1) return -1;
return 0xff & (int)mOneByte[0];
}
#Override
public void close() throws IOException {
if (mIn != null) {
InputStream in = mIn;
mIn = null;
in.close();
}
}
#Override
public int available() throws IOException {
return (mIn.available() + mBufCount) / 2;
}
}
After doing all of this, as I said the voice is too screeching and noisy. Not sure where am I wrong.
Any help, is appreciated! Thank you in advance.
Related
I'm working on a android wear app. This app records local audio from microphone of smartwatch and sends it to the handle device. The handle device receives data and writes a .wav file. The file is created, but when I listen the file is empty , I can listen only silence.
this is the wear code:
public void replyAudioByByte(final byte data[]) {
final String path = "/start_activity";
final Byte[] text= new Byte[1024];
GoogleApiClient client = new GoogleApiClient.Builder(getApplicationContext())
.addApi(Wearable.API)
.build();
new Thread(new Runnable() {
#Override
public void run() {
NodeApi.GetConnectedNodesResult nodes = Wearable.NodeApi.getConnectedNodes(mApiClient).await();
for (Node node : nodes.getNodes()) {
MessageApi.SendMessageResult result = Wearable.MessageApi.sendMessage(
mApiClient, node.getId(),AUDIO_RECORDER, data).await();
if (result.getStatus().isSuccess()) {
Log.d("sendMessage","Message send!!");
for (int j=0; j<data.length; j++ ){
Log.v("Mobile", "Message: {" + data[j] + "} sent to: " + node.getDisplayName());
}
} else {
// Log an error
Log.v("Mobile", "ERROR: failed to send Message");
}
}
}
}).start();
client.disconnect();
Log.d("MOBILE", "send message end");
}
public void startRecordingAudio() {
recorder = findAudioRecord();
Log.d("recorder:","recorder="+recorder.toString());
CountDownTimer countDowntimer = new CountDownTimer(8000, 1000) {
public void onTick(long millisUntilFinished) {
}
public void onFinish() {
try {
//Toast.makeText(getBaseContext(), "Stop recording Automatically ", Toast.LENGTH_LONG).show();
Log.d("wear", "stopRecorder=" + System.currentTimeMillis());
recorder.stop();
Log.d("formato registrazione","recorderformat="+recorder.getAudioFormat()+"-----rate=");
Log.d("formato registrazione","recordersamplerate=" +recorder.getSampleRate());
isRecording=false;
replyAudioByByte(data);
for (int j=0; j< data.length;j++){
Log.d("watch audio registrato", "data[]="+data[j]);
}
Log.d("wear", "recorder.stop ok!");
} catch (Exception e) {
// TODO Auto-generated catch block
Log.e("wear", "recorder.stop catch");
e.printStackTrace();
}
}
};
recorder.startRecording();
countDowntimer.start();
Log.d("wear", "startRecorder=" + System.currentTimeMillis());
isRecording = true;
recordingThread = new Thread(new Runnable() {
public void run() {
while (isRecording ) {
recorder.read(data, 0, bufferSize);
Log.d("WEAR","recorder.read="+recorder.read(data, 0, bufferSize));
}
recorder.stop();
recorder.release();
for (int i = 0; i < bufferSize; i++) {
Log.d("startrecording", "data=" + data[i]);
}
}
}, "AudioRecorder Thread");
recordingThread.start();
int a= recorder.getSampleRate();
Log.d("formato registrazione","recorderformat="+recorder.getAudioFormat()+"-----rate="+a);
Log.d("formato registrazione","recordersamplerate=" +recorder.getSampleRate());
}
public AudioRecord findAudioRecord() {
/** The settings that i must use are not the same for every device, so i try if they work */
for (int rate : mSampleRates) {
for (short audioFormat : audioF) {
for (short channelConfig : channelC) {
try {
//Log.d("Check", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
//It checks if it can instantiate the audiorecorder without problems
AudioRecord recorder = new AudioRecord(AudioSource.MIC, rate, channelConfig, audioFormat, bufferSize + 2000);
Log.d("AudioRecorder data","AudioSource.Default="+ AudioSource.MIC);
Log.d("AudioRecorder data","Rate="+ rate);
Log.d("AudioRecorder data","Channel.config="+ channelConfig);
Log.d("AudioRecorder data","AudioFormat= "+audioFormat);
bufferSize=bufferSize+2000;
Log.d("AudioRecorder data","buffersize="+ bufferSize );
if (recorder.getState() == AudioRecord.STATE_INITIALIZED)
Log.d("audiorec","rate="+rate);
return recorder;
}
} catch (Exception e) {
Log.e("Check", rate + "Exception, keep trying.", e);
}
}
}
}
return null;
}
this is the handle code:
public Void doInBackground(byte [] dataToWrite) {
Log.d("doInBackground","entrato");
byte data[] = new byte[bufferSize];
String tempfilename = "";
FileOutputStream os = null;
//if(allowRecorder){
tempfilename = getTempFilename();
Log.d("doInBackground","getTempFilename=" +tempfilename.toString());
try {
os = new FileOutputStream(tempfilename);
Log.d("doInBackground","os new ok" );
} catch (FileNotFoundException e) {
e.printStackTrace();
}
dbData = new ArrayList<Double>();
Log.d("doInBackGround", "dateToWrite.length=" + dataToWrite.length);
for (int j = 0; j < dataToWrite.length; j++) {
try {
os.write(dataToWrite);
Log.d("os,write", "dataToWrite");
} catch (IOException e) {
e.printStackTrace();
}
}
if(data[data.length-1]!=0){
double Db = 20 * Math.log10(Math.abs((data[data.length-1]/51805.5336) / 0.00002));
dbData.add(Db);
}
try {
os.close();
Log.d("os.close", "dataToWrite");
copyWaveFile(tempfilename,getFilename());
deleteTempFile();
} catch (IOException e) {
e.printStackTrace();
}
return null;
}
private void copyWaveFile(String inFilename,String outFilename){
FileInputStream in = null;
FileOutputStream out = null;
long totalAudioLen = 0;
long totalDataLen = 0;
long longSampleRate = 8000;
System.out.println("SAMPLE RATE = "+longSampleRate);
int channels = 12;
audioFormat = 16;
long byteRate = audioFormat * longSampleRate * channels/8;
byte[] data = new byte[bufferSize];
try {
in = new FileInputStream(inFilename);
out = new FileOutputStream(outFilename);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
Log.d("RecorderRead","totalAudioLen=" +totalAudioLen);
Log.d("RecorderRead","totalDatalen=" +totalDataLen);
System.out.println("Temp File size: " + totalDataLen);
Log.d("AudioRecorder data","AudioSource.Default="+ AudioSource.DEFAULT);
Log.d("AudioRecorder data","Rate="+ longSampleRate);
Log.d("AudioRecorder data","Channel.config="+ channels);
Log.d("AudioRecorder data","AudioFormat= "+audioFormat);
//bufferSize=bufferSize+2000;
Log.d("AudioRecorder data","buffersize="+ bufferSize );
if(totalDataLen != 36){
writeWaveFileHeader(out, totalAudioLen, totalDataLen,
longSampleRate, channels, byteRate);
Log.d("writeWAVEFILE", "chiamato");
while(in.read(data) != -1){
out.write(data);
}
System.out.println("Wav File size: " + out.getChannel().size());
}
else{
System.out.println("Non creo il file .wav");
}
in.close();
out.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
private void writeWaveFileHeader(
FileOutputStream out, long totalAudioLen,
long totalDataLen, long longSampleRate, int channels,
long byteRate) throws IOException {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF/WAVE header
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1; // format = 1
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) (2 * 16 / 8); // block align
header[33] = 0;
header[34] = (byte) audioFormat; // bits per sample
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
in wear mainifest I have
<uses-permission android:name="android.permission.RECORD_AUDIO" />
<uses-permission android:name="android.permission.MODIFY_AUDIO_SETTINGS" />
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
in handle manifest I have
<uses-permission android:name="android.permission.WRITE_EXTERNAL_STORAGE" />
when I run app in the logfile of wear app I have this error:
AudioRecord-JNI: Error -4 during AudioRecord native read
What should I do to solve it?
someone can help me? What's wrong? every type of help is appreciated, code or tutorial.
Thanks in advance
You need to set the audio encoder to be used for recording.
void setAudioEncoder (int audio_encoder)
If this method is not called, the output file will not contain an audio track. Call this after setOutputFormat() but before prepare().
Try to follow listed steps below:
Create a new instance of android.media.MediaRecorder.
Set the audio source using MediaRecorder.setAudioSource(). You will probably want to use MediaRecorder.AudioSource.MIC.
Set output file format using MediaRecorder.setOutputFormat().
Set output file name using MediaRecorder.setOutputFile().
Set the audio encoder using MediaRecorder.setAudioEncoder().
Call MediaRecorder.prepare() on the MediaRecorder instance.
To start audio capture, call MediaRecorder.start().
To stop audio capture, call MediaRecorder.stop().
When you are done with the MediaRecorder instance, call MediaRecorder.release() on it. Calling MediaRecorder.release() is always recommended to free the resource immediately.
Here's a sample code how to record audio and to play the recorded audio: https://developer.android.com/guide/topics/media/audio-capture.html#example
I recorded an audio using the audio recorder on Android and it produces a raw PCM file. I'm trying to convert it to a format I can listen to (wav or mp3 for example.
I've started with this example but don't know where to go from here: Android AudioRecord example
tried following these:
http://computermusicblog.com/blog/2008/08/29/reading-and-writing-wav-files-in-java
Recording .Wav with Android AudioRecorder
Here is my code to record (note I am using Countdown Timer to tell it when to start and stop recording.
public class AudioRecordService extends Service {
Toast toast;
private static final int RECORDER_SAMPLERATE = 44100;
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_MONO;
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
private AudioRecord record = null;
int BufferElements2Rec = 1024; // want to play 2048 (2K) since 2 bytes we use only 1024
int BytesPerElement = 2; // 2 bytes in 16bit format
private Thread recordingThread = null;
private boolean isRecording = false;
int buffsize = 0;
public AudioRecordService() {
}
#Override
public IBinder onBind(Intent intent) {
// TODO: Return the communication channel to the service.
throw new UnsupportedOperationException("Not yet implemented");
}
public int onStartCommand(Intent intent, int flags, int startId)
{
try {
buffsize = AudioRecord.getMinBufferSize(RECORDER_SAMPLERATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
record = new AudioRecord(MediaRecorder.AudioSource.MIC,
RECORDER_SAMPLERATE, RECORDER_CHANNELS,
RECORDER_AUDIO_ENCODING, buffsize);
record.startRecording();
CountDownTimer countDowntimer = new CountDownTimer(15000, 1000) {
public void onTick(long millisUntilFinished) {
toast = Toast.makeText(AudioRecordService.this, "Recording", Toast.LENGTH_SHORT);
toast.show();
isRecording = true;
recordingThread = new Thread(new Runnable() {
public void run() {
writeAudioDataToFile();
}
}, "AudioRecorder Thread");
recordingThread.start();
}
public void onFinish() {
try {
toast.cancel();
Toast.makeText(AudioRecordService.this, "Done Recording ", Toast.LENGTH_SHORT).show();
isRecording = false;
record.stop();
record.release();
record = null;
recordingThread = null;
} catch (Exception e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
}};
countDowntimer.start();
}
catch (Exception ex)
{
ex.printStackTrace();
}
return Service.START_STICKY;
}
private byte[] short2byte(short[] sData) {
int shortArrsize = sData.length;
byte[] bytes = new byte[shortArrsize * 2];
for (int i = 0; i < shortArrsize; i++) {
bytes[i * 2] = (byte) (sData[i] & 0x00FF);
bytes[(i * 2) + 1] = (byte) (sData[i] >> 8);
sData[i] = 0;
}
return bytes;
}
private void writeAudioDataToFile() {
try {
//String filePath = "/sdcard/voice8K16bitmono.pcm";
String extState = Environment.getExternalStorageState();
// Path to write files to
String path = Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_MUSIC + "/test").getAbsolutePath();
String fileName = "audio.pcm";
String externalStorage = Environment.getExternalStorageDirectory().getAbsolutePath();
File file = new File(externalStorage + File.separator + fileName);
// if file doesnt exists, then create it
if (!file.exists()) {
file.createNewFile();
}
short sData[] = new short[BufferElements2Rec];
FileOutputStream os = null;
try {
os = new FileOutputStream(file);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
while (isRecording) {
// gets the voice output from microphone to byte format
record.read(sData, 0, BufferElements2Rec);
System.out.println("Short wirting to file" + sData.toString());
try {
// // writes the data to file from buffer
// // stores the voice buffer
byte bData[] = short2byte(sData);
os.write(bData, 0, BufferElements2Rec * BytesPerElement);
} catch (IOException e) {
e.printStackTrace();
}
}
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
catch (Exception ex) {
ex.printStackTrace();
}
}
}
My audio.pcm is created. However I don't know how to play it. I'm assuming bDate[] is the byte array being written. The links I created said they used the files, but showed no examples of how it was done.
If it matters, I've used GoldWave to open the file. It opens but the audio is messed up.
I also noticed my files were 2 seconds and I think it's because of the BytesPerElement and BufferElements2Rec. If you can help me out so it's going to be 15 seconds that would be great.
Thanks in advance!
The only difference between a PCM file and a WAV file is that the PCM file has no header and the WAV file does. The WAV header has key information for playback such as sample rate, number of bits per sample and number of channels. When you load a PCM file either the app has to have prior knowledge of this information or you have to tell it. If you load a PCM file into audacity, for example, it will prompt you to fill in all of that stuff.
In order to make the existing save file a .WAV you need to prepend an appropriate header. I'm not going to go into details about it because there are already many answers on SO detailing it and it is readily available on the web (https://en.wikipedia.org/wiki/WAV)
The second issue you raise about the file length might have something to do with the fact that AudioRecord.read returns an int which is the number of samples actually read as it may be less than you asked for. This is really a second question though
This is a Sample .WAV Header format Extracted from OMRECORDER:
private byte[] wavFileHeader(long totalAudioLen, long totalDataLen, long longSampleRate,
int channels, long byteRate, byte bitsPerSample) {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF/WAVE header
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1; // format = 1
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) (channels * (bitsPerSample / 8)); //
// block align
header[33] = 0;
header[34] = bitsPerSample; // bits per sample
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
return header;
}
Below is Header format for .aac extracted from WhatsappAudioRecorder:
private byte[] createAdtsHeader(int length) {
int frameLength = length + 7;
byte[] adtsHeader = new byte[7];
adtsHeader[0] = (byte) 0xFF; // Sync Word
adtsHeader[1] = (byte) 0xF1; // MPEG-4, Layer (0), No CRC
adtsHeader[2] = (byte) ((MediaCodecInfo.CodecProfileLevel.AACObjectLC - 1) << 6);
adtsHeader[2] |= (((byte) SAMPLE_RATE_INDEX) << 2);
adtsHeader[2] |= (((byte) CHANNELS) >> 2);
adtsHeader[3] = (byte) (((CHANNELS & 3) << 6) | ((frameLength >> 11) & 0x03));
adtsHeader[4] = (byte) ((frameLength >> 3) & 0xFF);
adtsHeader[5] = (byte) (((frameLength & 0x07) << 5) | 0x1f);
adtsHeader[6] = (byte) 0xFC;
return adtsHeader;
}
I recording audio with class audoiRecord. Now I want convert audio raw file to *flac format. I convert *raw file to wav next way:
private void copyWaveFile(String inFilename,String outFilename){
FileInputStream in = null;
FileOutputStream out = null;
long totalAudioLen = 0;
long totalDataLen = totalAudioLen + 36;
long longSampleRate = sampleRate;
int channels = 2;
long byteRate = RECORDER_BPP * sampleRate * channels/8;
byte[] data_pcm = new byte[mAudioBufferSize];
try {
in = new FileInputStream(inFilename);
out = new FileOutputStream(outFilename);
totalAudioLen = in.getChannel().size();
totalDataLen = totalAudioLen + 36;
Log.i(TAG,"File size: " + totalDataLen);
WriteWaveFileHeader(out, totalAudioLen, totalDataLen,
longSampleRate, channels, byteRate);
while(in.read(data_pcm) != -1){
out.write(data_pcm);
}
in.close();
out.close();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
This piece of code is responsible for the file header
private void WriteWaveFileHeader(
FileOutputStream out, long totalAudioLen,
long totalDataLen, long longSampleRate, int channels,
long byteRate) throws IOException {
byte[] header = new byte[44];
header[0] = 'R'; // RIFF/WAVE header
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte) (totalDataLen & 0xff);
header[5] = (byte) ((totalDataLen >> 8) & 0xff);
header[6] = (byte) ((totalDataLen >> 16) & 0xff);
header[7] = (byte) ((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f'; // 'fmt ' chunk
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16; // 4 bytes: size of 'fmt ' chunk
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1; // format = 1
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte) (longSampleRate & 0xff);
header[25] = (byte) ((longSampleRate >> 8) & 0xff);
header[26] = (byte) ((longSampleRate >> 16) & 0xff);
header[27] = (byte) ((longSampleRate >> 24) & 0xff);
header[28] = (byte) (byteRate & 0xff);
header[29] = (byte) ((byteRate >> 8) & 0xff);
header[30] = (byte) ((byteRate >> 16) & 0xff);
header[31] = (byte) ((byteRate >> 24) & 0xff);
header[32] = (byte) (2 * 16 / 8); // block align
header[33] = 0;
header[34] = RECORDER_BPP; // bits per sample
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte) (totalAudioLen & 0xff);
header[41] = (byte) ((totalAudioLen >> 8) & 0xff);
header[42] = (byte) ((totalAudioLen >> 16) & 0xff);
header[43] = (byte) ((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
I do not understand what should be the parameters of the *flac file
You need an encoder to convert pcm data to flac format. You cannot just change the header and expect the content to work as flac.
Android (at least till 4.1) does not include a FLAC encoder, although there is a decoder supported from 3.1 onwards (Source: http://developer.android.com/guide/appendix/media-formats.html).
I do not have direct experience, but have seen people use ffmpeg as a flac encoder. This project audioboo-android, which contains the native libFLAC/libFLAC++ encoder, looks interesting.
So, from Android 4.1 you can do so:
Initializing:
MediaCodecList mcl = new MediaCodecList(MediaCodecList.REGULAR_CODECS);
MediaFormat format = new MediaFormat();
format.setString(MediaFormat.KEY_MIME, "audio/flac");
format.setInteger(MediaFormat.KEY_BIT_RATE, 64000);
format.setInteger(MediaFormat.KEY_SAMPLE_RATE, SAMPLE_RATE);
format.setInteger(MediaFormat.KEY_CHANNEL_COUNT, NUM_CHANNELS);
String codecname = mcl.findEncoderForFormat(format);
Log.w(TAG, "Codec: "+codecname);
MediaCodec codec = null;
try
{
codec = MediaCodec.createByCodecName(codecname);
} catch (IOException e)
{
e.printStackTrace();
}
codec.configure(format, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
MediaFormat outputFormat = codec.getOutputFormat(); // option B
long usec = 1000000000L * FRAME_SIZE/SAMPLE_RATE;
MediaCodec.BufferInfo bufinfo = new MediaCodec.BufferInfo();
bufinfo.set(0, FRAME_SIZE * NUM_CHANNELS * 2, usec, 0);
codec.start();
byte[] inBuf = new byte[FRAME_SIZE * NUM_CHANNELS * 2];
byte[] encBuf = new byte[10240];
In the recorder loop:
int encoded = 0;
int inputBufferId = codec.dequeueInputBuffer(1000);
if (inputBufferId >= 0) {
ByteBuffer inputBuffer = codec.getInputBuffer(inputBufferId);
// fill inputBuffer with valid data
inputBuffer.put(inBuf, 0, inBuf.length);
codec.queueInputBuffer(inputBufferId, 0, inBuf.length, usec, 0);
}
int outputBufferId = codec.dequeueOutputBuffer(bufinfo, 1000);
if (outputBufferId >= 0) {
ByteBuffer outputBuffer = codec.getOutputBuffer(outputBufferId);
MediaFormat bufferFormat = codec.getOutputFormat(outputBufferId); // option A
// bufferFormat is identical to outputFormat
// outputBuffer is ready to be processed or rendered.
outputBuffer.rewind();
encoded = outputBuffer.remaining();
outputBuffer.get(encBuf, 0, encoded);
codec.releaseOutputBuffer(outputBufferId, false);
} else if (outputBufferId == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
// Subsequent data will conform to new format.
// Can ignore if using getOutputFormat(outputBufferId)
outputFormat = codec.getOutputFormat(); // option B
}
if (encoded > 0)
{
// Process data in encBuf
}
Here's a pure java FLAC encoder: http://javaflacencoder.sourceforge.net
Some of the classes use the javax apis, but they can be safely deleted without affecting the main encoder classes.
Here's some sample code. The record object is of type AudioRecord
try {
// Path to write files to
String path = Environment.getExternalStoragePublicDirectory("/test").getAbsolutePath();
String fileName = name+".flac";
String externalStorage = path;
File file = new File(externalStorage + File.separator + fileName);
// if file doesnt exists, then create it
if (!file.exists()) {
file.createNewFile();
}
short sData[] = new short[BufferElements2Rec];
FileOutputStream os = null;
try {
os = new FileOutputStream(file);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
FLACEncoder flacEncoder = new FLACEncoder();
StreamConfiguration streamConfiguration = new StreamConfiguration(1,StreamConfiguration.MIN_BLOCK_SIZE,StreamConfiguration.MAX_BLOCK_SIZE,44100,16);
FLACFileOutputStream flacOut = new FLACFileOutputStream(os);
flacEncoder.setStreamConfiguration(streamConfiguration);
flacEncoder.setOutputStream(flacOut);
flacEncoder.openFLACStream();
record.startRecording();
int totalSamples = 0;
while (isRecording) {
record.read(sData, 0, BufferElements2Rec);
totalSamples+=BufferElements2Rec;
flacEncoder.addSamples(short2int(sData),BufferElements2Rec);
flacEncoder.encodeSamples(BufferElements2Rec, false);
}
int available = flacEncoder.samplesAvailableToEncode();
while(flacEncoder.encodeSamples(available,true) < available) {
available = flacEncoder.samplesAvailableToEncode();
}
try {
flacOut.close();
} catch (IOException e) {
e.printStackTrace();
}
record.stop();
} catch(IOException ex) {
ex.printStackTrace();
}
record.release();
record = null;
}
For converting the short data into int data:
private int[] short2int(short[] sData) {
int length = sData.length;
int[] iData = new int[length];
for(int i=0;i<length;i++) {
iData[i] = sData[i];
}
return iData;
}
Based on https://github.com/nieldeokar/WhatsappAudioRecorder/blob/master/app/src/main/java/com/nieldeokar/whatsappaudiorecorder/recorder/AudioRecordThread.java
My solution for save recording in .m4a file while working speech recognition :
import android.media.AudioFormat;
import android.media.AudioRecord;
import android.media.MediaCodec;
import android.media.MediaCodecInfo;
import android.media.MediaFormat;
import android.media.MediaRecorder;
import android.os.Handler;
import android.os.Looper;
import java.io.File;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.OutputStream;
import java.nio.ByteBuffer;
import java.util.Collection;
import java.util.HashSet;
import timber.log.Timber;
public class SpeechRecognizer {
private static final int CHANNELS = 1;
private static final int BIT_RATE = 32000;
private static final int SAMPLE_RATE = 44100;
private static final int SAMPLE_RATE_INDEX = 4;
protected static final String TAG = SpeechRecognizer.class.getSimpleName();
public int bufferSize;
public final Collection<RecognitionListener> listeners = new HashSet();
public final Handler mainHandler = new Handler(Looper.getMainLooper());
public final Recognizer recognizer;
private Thread recognizerThread;
public final AudioRecord recorder;
private SoundAmplitudeCallback soundAmplitudeCallback;
private File recordFile = null;
private boolean isRecordingToFileEnabled = false;
private boolean isRecordingToFilePrepared = false;
private boolean isContinueRecordingToFile = false;
public interface SoundAmplitudeCallback {
void onAmplitude(int amplitude);
}
public void setSoundAmplitudeCallback(SoundAmplitudeCallback callback) {
soundAmplitudeCallback = callback;
}
public SpeechRecognizer(Mabcd model) throws IOException {
this.recognizer = new Recognizer(model, SAMPLE_RATE);
this.bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
this.recorder = createAudioRecorder(this.bufferSize);
if (this.recorder.getState() == 0) {
this.recorder.release();
throw new IOException("Failed to initialize recorder. Microphone might be already in use.");
}
}
public void addListener(RecognitionListener listener) {
synchronized (this.listeners) {
this.listeners.add(listener);
}
}
public void removeListener(RecognitionListener listener) {
synchronized (this.listeners) {
this.listeners.remove(listener);
}
}
public boolean startListening() {
if (this.recognizerThread != null) {
return false;
}
this.recognizerThread = new RecognizerThread(this);
this.recognizerThread.start();
return true;
}
public boolean startListening(int timeout) {
if (this.recognizerThread != null) {
return false;
}
this.recognizerThread = new RecognizerThread(timeout);
this.recognizerThread.start();
return true;
}
private boolean stopRecognizerThread() {
if (this.recognizerThread == null) {
return false;
}
try {
this.recognizerThread.interrupt();
this.recognizerThread.join();
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
}
this.recognizerThread = null;
return true;
}
public void startRecordToFile(File fileRecord) {
this.recordFile = fileRecord;
isRecordingToFileEnabled = true;
}
public void resumeRecordToFile(File fileRecord) {
this.recordFile = fileRecord;
isContinueRecordingToFile = true;
isRecordingToFileEnabled = true;
isRecordingToFilePrepared = false;
}
public void stopRecordToFile() {
isRecordingToFileEnabled = false;
isRecordingToFilePrepared = false;
isContinueRecordingToFile = false;
}
public boolean stop() {
boolean result = stopRecognizerThread();
if (result) {
this.mainHandler.post(new ResultEvent(this.recognizer.Rabcd(), true));
}
return result;
}
public boolean cancel() {
boolean result = stopRecognizerThread();
this.recognizer.Rabcd();
return result;
}
public void shutdown() {
this.recorder.release();
}
private final class RecognizerThread extends Thread {
private static final int NO_TIMEOUT = -1;
private int remainingSamples;
private int timeoutSamples;
VoiceRecorder voiceRecorder = null;
public RecognizerThread(int timeout) {
if (timeout != NO_TIMEOUT) {
this.timeoutSamples = (SpeechRecognizer.SAMPLE_RATE * timeout) / 1000;
} else {
this.timeoutSamples = NO_TIMEOUT;
}
this.remainingSamples = this.timeoutSamples;
}
public RecognizerThread(SpeechRecognizer speechRecognizer) {
this(NO_TIMEOUT);
}
public void run() {
voiceRecorder = new VoiceRecorder();
SpeechRecognizer.this.recorder.startRecording();
if (SpeechRecognizer.this.recorder.getRecordingState() == AudioRecord.RECORDSTATE_STOPPED) {
SpeechRecognizer.this.recorder.stop();
SpeechRecognizer.this.mainHandler.post(new OnErrorEvent(new IOException("Failed to start recording. Microphone might be already in use.")));
return;
}
byte[] buffer = new byte[SpeechRecognizer.this.bufferSize];
while (!interrupted() && (this.timeoutSamples == NO_TIMEOUT || this.remainingSamples > 0)) {
int nread = SpeechRecognizer.this.recorder.read(buffer, 0, buffer.length);
if (soundAmplitudeCallback != null) {
int max = 0;
for (short s : buffer) {
if (Math.abs(s) > max) {
max = Math.abs(s);
}
}
soundAmplitudeCallback.onAmplitude(max);
}
if (nread < 0) {
throw new RuntimeException("error reading audio buffer");
}
voiceRecorder.recording(nread, buffer);
if (SpeechRecognizer.this.recognizer.Aabcd(buffer, nread)) {
SpeechRecognizer.this.mainHandler.post(new ResultEvent(SpeechRecognizer.this.recognizer.Rabcd(), true));
} else {
SpeechRecognizer.this.mainHandler.post(new ResultEvent(SpeechRecognizer.this.recognizer.Pabcd(), false));
}
if (this.timeoutSamples != NO_TIMEOUT) {
this.remainingSamples -= nread;
}
}
voiceRecorder.shutdown();
SpeechRecognizer.this.recorder.stop();
SpeechRecognizer.this.mainHandler.removeCallbacksAndMessages((Object) null);
if (this.timeoutSamples != NO_TIMEOUT && this.remainingSamples <= 0) {
SpeechRecognizer.this.mainHandler.post(new TimeoutEvent());
}
}
}
/*
* Voice Recorder to file
* */
private class VoiceRecorder{
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
FileOutputStream fileOutputStream = null;
MediaCodec mediaCodec = null;
void recording(int nread, byte[] buffer){
/*step 1 prepare file*/
if (isRecordingToFileEnabled && !isRecordingToFilePrepared) {
//if we continue recording not create new file
if (recordFile == null) {
throw new IllegalArgumentException("Record file is null");
}
try {
fileOutputStream = new FileOutputStream(recordFile, isContinueRecordingToFile);
} catch (FileNotFoundException e) {
throw new RuntimeException(e);
}
if (mediaCodec == null){
try {
mediaCodec = createMediaCodec(bufferSize);
mediaCodec.start();
Timber.d("mediaCodec.start()");
} catch (IOException e) {
e.printStackTrace();
}
}
isRecordingToFilePrepared = true;
}
/*prepare file*/
/*step 2 recording*/
if (isRecordingToFileEnabled && isRecordingToFilePrepared) {
try {
if (fileOutputStream != null){
boolean success = handleCodecInput(nread, buffer, mediaCodec, Thread.currentThread().isAlive());
if (success)
handleCodecOutput(mediaCodec, bufferInfo, fileOutputStream);
}
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*recording*/
/*step 3 finish recording and save to file*/
if (!isRecordingToFileEnabled && fileOutputStream != null) {
try {
VoiceRecorder.this.shutdown();
fileOutputStream.flush();
fileOutputStream.close();
fileOutputStream = null;
Timber.d("Finishing file");
} catch (IOException e) {
throw new RuntimeException(e);
}
}
/*finish recording and save to file*/
}
void shutdown(){
if (mediaCodec != null){
mediaCodec.stop();
mediaCodec.release();
mediaCodec = null;
}
}
}
private abstract class RecognitionEvent implements Runnable {
public abstract void execute(RecognitionListener recognitionListener);
private RecognitionEvent() {
}
public void run() {
for (RecognitionListener listener : (RecognitionListener[]) SpeechRecognizer.this.listeners.toArray(new RecognitionListener[0])) {
execute(listener);
}
}
}
private class ResultEvent extends RecognitionEvent {
private final boolean finalResult;
protected final String hypothesis;
ResultEvent(String hypothesis2, boolean finalResult2) {
super();
this.hypothesis = hypothesis2;
this.finalResult = finalResult2;
}
public void execute(RecognitionListener listener) {
if (this.finalResult) {
listener.onResult(this.hypothesis);
} else {
listener.onPartialResult(this.hypothesis);
}
}
}
private class OnErrorEvent extends RecognitionEvent {
private final Exception exception;
OnErrorEvent(Exception exception2) {
super();
this.exception = exception2;
}
public void execute(RecognitionListener listener) {
listener.onError(this.exception);
}
}
private class TimeoutEvent extends RecognitionEvent {
private TimeoutEvent() {
super();
}
public void execute(RecognitionListener listener) {
listener.onTimeout();
}
}
private AudioRecord createAudioRecorder(int bufferSize) {
AudioRecord recorder = new AudioRecord(MediaRecorder.AudioSource.VOICE_RECOGNITION, SAMPLE_RATE, AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT, bufferSize * 10);
if (android.media.audiofx.NoiseSuppressor.isAvailable()) {
android.media.audiofx.NoiseSuppressor noiseSuppressor = android.media.audiofx.NoiseSuppressor
.create(recorder.getAudioSessionId());
if (noiseSuppressor != null) {
noiseSuppressor.setEnabled(true);
}
}
if (android.media.audiofx.AutomaticGainControl.isAvailable()) {
android.media.audiofx.AutomaticGainControl automaticGainControl = android.media.audiofx.AutomaticGainControl
.create(recorder.getAudioSessionId());
if (automaticGainControl != null) {
automaticGainControl.setEnabled(true);
}
}
return recorder;
}
private MediaCodec createMediaCodec(int bufferSize) throws IOException {
MediaCodec mediaCodec = MediaCodec.createEncoderByType("audio/mp4a-latm");
MediaFormat mediaFormat = new MediaFormat();
mediaFormat.setString(MediaFormat.KEY_MIME, "audio/mp4a-latm");
mediaFormat.setInteger(MediaFormat.KEY_SAMPLE_RATE, SAMPLE_RATE);
mediaFormat.setInteger(MediaFormat.KEY_CHANNEL_COUNT, CHANNELS);
mediaFormat.setInteger(MediaFormat.KEY_MAX_INPUT_SIZE, bufferSize);
mediaFormat.setInteger(MediaFormat.KEY_BIT_RATE, BIT_RATE);
mediaFormat.setInteger(MediaFormat.KEY_AAC_PROFILE, MediaCodecInfo.CodecProfileLevel.AACObjectLC);
try {
mediaCodec.configure(mediaFormat, null, null, MediaCodec.CONFIGURE_FLAG_ENCODE);
} catch (Exception e) {
Timber.tag(TAG).w(e);
mediaCodec.release();
throw new IOException(e);
}
return mediaCodec;
}
private boolean handleCodecInput(int length,
byte[] buffer,
MediaCodec mediaCodec,
boolean running) {
if (length == AudioRecord.ERROR_BAD_VALUE ||
length == AudioRecord.ERROR_INVALID_OPERATION ||
length != bufferSize) {
if (length != bufferSize) {
Timber.tag(TAG).d( "length != BufferSize calling onRecordFailed");
// if (onRecorderFailedListener != null) {
// Log.d(TAG, "length != BufferSize calling onRecordFailed");
// onRecorderFailedListener.onRecorderFailed();
// }
return false;
}
}
int codecInputBufferIndex = mediaCodec.dequeueInputBuffer(10 * 1000);
if (codecInputBufferIndex >= 0) {
ByteBuffer codecBuffer = mediaCodec.getInputBuffer(codecInputBufferIndex);
codecBuffer.clear();
codecBuffer.put(buffer);
mediaCodec.queueInputBuffer(codecInputBufferIndex, 0, length, 0, running ? 0 : MediaCodec.BUFFER_FLAG_END_OF_STREAM);
}
return true;
}
private void handleCodecOutput(MediaCodec mediaCodec,
MediaCodec.BufferInfo bufferInfo,
OutputStream outputStream) throws IOException {
int codecOutputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0);
while (codecOutputBufferIndex != MediaCodec.INFO_TRY_AGAIN_LATER) {
if (codecOutputBufferIndex >= 0) {
ByteBuffer encoderOutputBuffer = mediaCodec.getOutputBuffer(codecOutputBufferIndex);
encoderOutputBuffer.position(bufferInfo.offset);
encoderOutputBuffer.limit(bufferInfo.offset + bufferInfo.size);
if ((bufferInfo.flags & MediaCodec.BUFFER_FLAG_CODEC_CONFIG) != MediaCodec.BUFFER_FLAG_CODEC_CONFIG) {
byte[] header = createAdtsHeader(bufferInfo.size - bufferInfo.offset);
outputStream.write(header);
byte[] data = new byte[encoderOutputBuffer.remaining()];
encoderOutputBuffer.get(data);
outputStream.write(data);
}
encoderOutputBuffer.clear();
mediaCodec.releaseOutputBuffer(codecOutputBufferIndex, false);
}
codecOutputBufferIndex = mediaCodec.dequeueOutputBuffer(bufferInfo, 0);
}
}
private byte[] createAdtsHeader(int length) {
int frameLength = length + 7;
byte[] adtsHeader = new byte[7];
adtsHeader[0] = (byte) 0xFF; // Sync Word
adtsHeader[1] = (byte) 0xF1; // MPEG-4, Layer (0), No CRC
adtsHeader[2] = (byte) ((MediaCodecInfo.CodecProfileLevel.AACObjectLC - 1) << 6);
adtsHeader[2] |= (((byte) SAMPLE_RATE_INDEX) << 2);
adtsHeader[2] |= (((byte) CHANNELS) >> 2);
adtsHeader[3] = (byte) (((CHANNELS & 3) << 6) | ((frameLength >> 11) & 0x03));
adtsHeader[4] = (byte) ((frameLength >> 3) & 0xFF);
adtsHeader[5] = (byte) (((frameLength & 0x07) << 5) | 0x1f);
adtsHeader[6] = (byte) 0xFC;
return adtsHeader;
}
}
java android speech recognition recording file pcm m4a
I have written code for recording audio file using AudioRecord and while writing file on SD card i am making two version.
Version 1
Recorded file is saved on SD Card as it is.
Version 2
I am applying Gain feature on recorded file and saving on SD card.
This works awesome on Sony Ericson mobiles.Also audio volume is boost to great extent.
But i am struggling to make it work on Samsung Devices.
when i play recorded file it sound like Talking Tom :P
Initially i thought Samusung device did not like the combinations i have used to create
AudioRecorder.
So i used following approach in which i loop into available configuration and use best configuration to initialize AudioRecord.
public AudioRecord findAudioRecord() {
for (int rate: mSampleRates) {
for (short audioFormat: new short[] {
AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT
}) {
for (short channelConfig: new short[] {
AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO
}) {
try {
Log.i("vipul", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(
AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) return recorder;
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
return null;
}
Below is Code that is working good on Sony mobiles.But Struggling to work on Samsung Devices.
public class EnvironmentRecorder extends Activity implements OnClickListener {
private static final int RECORDER_BPP = 16;
private static final String AUDIO_RECORDER_FILE_EXT_WAV = ".wav";
private static final String AUDIO_RECORDER_FOLDER = "MyRecorder";
private static final String AUDIO_RECORDER_TEMP_FILE = "record_temp.raw";
private static final int RECORDER_SAMPLERATE = 44100;
private static final int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_STEREO;
private static final int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
private Button start, stop;
private AudioRecord recorder = null;
private int bufferSize = 0;
private Thread recordingThread = null;
private boolean isRecording = false;
private static int[] mSampleRates = new int[] {
8000, 11025, 22050, 44100
};
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.main);
start = (Button) findViewById(R.id.start);
stop = (Button) findViewById(R.id.stop);
start.setOnClickListener(this);
stop.setOnClickListener(this);
}
#Override
public void onClick(View view) {
switch (view.getId()) {
case R.id.start:
startRecord();
break;
case R.id.stop:
stopRecording();
break;
}
}
public EnvironmentRecorder() {
try {
bufferSize = AudioRecord.getMinBufferSize(RECORDER_SAMPLERATE, RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING);
} catch (Exception e) {
e.printStackTrace();
}
}
private String getFilename1() {
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath, AUDIO_RECORDER_FOLDER);
if (!file.exists()) {
file.mkdirs();
}
return (file.getAbsolutePath() + "/" + "NotGained" + AUDIO_RECORDER_FILE_EXT_WAV);
}
private String getFilename2() {
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath, AUDIO_RECORDER_FOLDER);
if (!file.exists()) {
file.mkdirs();
}
return (file.getAbsolutePath() + "/" + "Gained" + AUDIO_RECORDER_FILE_EXT_WAV);
}
private String getTempFilename() {
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File(filepath, AUDIO_RECORDER_FOLDER);
if (!file.exists()) {
file.mkdirs();
}
File tempFile = new File(filepath, AUDIO_RECORDER_TEMP_FILE);
if (tempFile.exists()) tempFile.delete();
return (file.getAbsolutePath() + "/" + AUDIO_RECORDER_TEMP_FILE);
}
public AudioRecord findAudioRecord() {
for (int rate: mSampleRates) {
for (short audioFormat: new short[] {
AudioFormat.ENCODING_PCM_8BIT, AudioFormat.ENCODING_PCM_16BIT
}) {
for (short channelConfig: new short[] {
AudioFormat.CHANNEL_IN_MONO, AudioFormat.CHANNEL_IN_STEREO
}) {
try {
Log.v("vipul", "Attempting rate " + rate + "Hz, bits: " + audioFormat + ", channel: " + channelConfig);
int bufferSize = AudioRecord.getMinBufferSize(rate, channelConfig, audioFormat);
if (bufferSize != AudioRecord.ERROR_BAD_VALUE) {
// check if we can instantiate and have a success
AudioRecord recorder = new AudioRecord(
AudioSource.DEFAULT, rate, channelConfig, audioFormat, bufferSize);
if (recorder.getState() == AudioRecord.STATE_INITIALIZED) return recorder;
}
} catch (Exception e) {
e.printStackTrace();
}
}
}
}
return null;
}
public void startRecord() {
/*
* recorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
* RECORDER_SAMPLERATE, RECORDER_CHANNELS, RECORDER_AUDIO_ENCODING,
* bufferSize);
*/
recorder = findAudioRecord();
recorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
#Override
public void run() {
writeAudioDataToFile();
}
}, "AudioRecorder Thread");
recordingThread.start();
}
private void writeAudioDataToFile() {
byte data[] = new byte[bufferSize];
String filename = getTempFilename();
FileOutputStream os = null;
try {
os = new FileOutputStream(filename);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
int read = 0;
if (null != os) {
while (isRecording) {
read = recorder.read(data, 0, bufferSize);
if (AudioRecord.ERROR_INVALID_OPERATION != read) {
try {
os.write(data);
} catch (IOException e) {
e.printStackTrace();
}
}
}
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
}
public void stopRecording() {
if (null != recorder) {
isRecording = false;
recorder.stop();
recorder.release();
recorder = null;
recordingThread = null;
copyWaveFile(getTempFilename(), getFilename1(), getFilename2());
deleteTempFile();
}
}
private void deleteTempFile() {
File file = new File(getTempFilename());
file.delete();
}
private void copyWaveFile(String inFilename, String outFileName1, String outFileName2) {
FileInputStream in = null;
FileOutputStream out1 = null, out2 = null;
long totalAudioLen = 0;
long totalDataLen = totalAudioLen + 36;
long longSampleRate = RECORDER_SAMPLERATE;
int channels = 2;
long byteRate = RECORDER_BPP * RECORDER_SAMPLERATE * channels / 8;
byte[] data = new byte[bufferSize];
try { in = new FileInputStream(inFilename);
out1 = new FileOutputStream(outFileName1);
out2 = new FileOutputStream(outFileName2);
totalAudioLen = in .getChannel().size();
totalDataLen = totalAudioLen + 36;
WriteWaveFileHeader(out1, totalAudioLen, totalDataLen, longSampleRate, channels, byteRate);
WriteWaveFileHeader(out2, totalAudioLen, totalDataLen, longSampleRate, channels, byteRate);
while ( in .read(data) != -1) {
out1.write(data); // Writing Non-Gained Data
float rGain = 2.5f;
for (int i = 0; i < data.length / 2; i++) {
short curSample = getShort(data[i * 2], data[i * 2 + 1]);
if (rGain != 1) {
// apply gain
curSample *= rGain;
// convert back from short sample that was "gained" to
// byte data
byte[] a = getByteFromShort(curSample);
// modify buffer to contain the gained sample
data[i * 2] = a[0];
data[i * 2 + 1] = a[1];
}
}
out2.write(data); // Writing Gained Data
}
out1.close();
out2.close(); in .close();
Toast.makeText(this, "Done!!", Toast.LENGTH_LONG).show();
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
}
}
private short getShort(byte argB1, byte argB2) {
return (short)((argB1 & 0xff) | (argB2 << 8));
}
private byte[] getByteFromShort(short x) {
// variant 1 - noise
byte[] a = new byte[2];
a[0] = (byte)(x & 0xff);
a[1] = (byte)((x >> 8) & 0xff);
// variant 2 - noise and almost broke my ears - very loud
// ByteBuffer buffer = ByteBuffer.allocate(2);
// buffer.putShort(x);
// buffer.flip();
return a;
}
private void WriteWaveFileHeader(FileOutputStream out, long totalAudioLen, long totalDataLen, long longSampleRate, int channels, long byteRate)
throws IOException {
byte[] header = new byte[44];
header[0] = 'R';
header[1] = 'I';
header[2] = 'F';
header[3] = 'F';
header[4] = (byte)(totalDataLen & 0xff);
header[5] = (byte)((totalDataLen >> 8) & 0xff);
header[6] = (byte)((totalDataLen >> 16) & 0xff);
header[7] = (byte)((totalDataLen >> 24) & 0xff);
header[8] = 'W';
header[9] = 'A';
header[10] = 'V';
header[11] = 'E';
header[12] = 'f';
header[13] = 'm';
header[14] = 't';
header[15] = ' ';
header[16] = 16;
header[17] = 0;
header[18] = 0;
header[19] = 0;
header[20] = 1;
header[21] = 0;
header[22] = (byte) channels;
header[23] = 0;
header[24] = (byte)(longSampleRate & 0xff);
header[25] = (byte)((longSampleRate >> 8) & 0xff);
header[26] = (byte)((longSampleRate >> 16) & 0xff);
header[27] = (byte)((longSampleRate >> 24) & 0xff);
header[28] = (byte)(byteRate & 0xff);
header[29] = (byte)((byteRate >> 8) & 0xff);
header[30] = (byte)((byteRate >> 16) & 0xff);
header[31] = (byte)((byteRate >> 24) & 0xff);
header[32] = (byte)(2 * 16 / 8);
header[33] = 0;
header[34] = RECORDER_BPP;
header[35] = 0;
header[36] = 'd';
header[37] = 'a';
header[38] = 't';
header[39] = 'a';
header[40] = (byte)(totalAudioLen & 0xff);
header[41] = (byte)((totalAudioLen >> 8) & 0xff);
header[42] = (byte)((totalAudioLen >> 16) & 0xff);
header[43] = (byte)((totalAudioLen >> 24) & 0xff);
out.write(header, 0, 44);
}
}
I would like to know if i need to add any extra loc to make my AudioRecord comfortable with Samsung Devices.
We're also struggling with audio recording on some Samsung Android Devices. Unfortunately it seems to be very broken, as even different revisions of the same phone model are behaving differently with the same codebase.
Here are my current findings, hoping you find something useful:
1. Broken Initialization:
Unfortunately, the strategy you are using to query for valid recording configurations will fail at least on Samsung Galaxy Young and Ace models running Android 2.3
The problem is that some invalid AudioRecord configurations instead of simply failing, will completely brick the audio capture subsystem if tried. You'll need to reset the phone to recover from this state.
2. Inconsistent Sampling-Rate support along revisions of same phone model
On an older Galaxy Ace Phone, recording # 11025Hz, 16-bit mono will succeed. On newer Ace revisions, this AudioRecord configuration will be accepted as valid, but the resulting recording will be distorted, with a "chipmunk" effect. A very popular guitar tuner app that has hardcoded this sampling rate is failing to give proper tuning readings on these phones precisely because of this problem!
3. Extremely low volume audio capture on some configurations.
In Galaxy Young and Galaxy Ace, recording from the mic or default audio source # 44,100Hz (the supposedly canonical rate where everything should work fine) produces an undistorted, but extremely low-volume recording. I haven't found yet a way to fix this other than software amplification (which is the equivalent of magnifying a very low res image, with the consecuent "jageddnes" of the result).
4. Failure to support the canonical 44,100Hz sampling rate on every audio capture source.
In Galaxy Young and Galaxy Ace, recording from the Camcorder source fails # 44,100Hz. (again, the configuration will be accepted as valid) producing complete garbage. However, recording # 8,000Hz, 16,000Hz and 48,000Hz works fine and produces a recording with very acceptable volume levels. What is frustrating is that according to the Android documentation, 44,100Hz is a sampling rate all devices SHOULD support.
5. OpenSL does not fix any of the problems reported.
Working with the NDK and OpenSL produces the same described results. It seems that the AudioRecorder class is simply wrapping calls to OpenSL, and the problem is either hardware based, or buried at a lower-level tier in the kernel code.
This situation is very unfortunately indeed, as these models are becoming very popular - at least in Mexico.
Good luck - and please report if you had better luck working with these phones.
=)
Audio gain conrol To increase the amplitude of the audio u need to calculate the gain factor and multiply calculated gain factor with every sample captured. The following code does that. P.S. Ignore the unrelated code
public class MainActivity extends Activity {
public static final int SAMPLE_RATE = 16000;
private AudioRecord mRecorder;
private File mRecording;
private short[] mBuffer;
private final String startRecordingLabel = "Start recording";
private final String stopRecordingLabel = "Stop recording";
private boolean mIsRecording = false;
private ProgressBar mProgressBar;
float iGain = 1.0f;
CheckBox gain;
protected int bitsPerSamples = 16;
#Override
public void onCreate(final Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.fragment_main);
initRecorder();
Button bluetooth = (Button)findViewById(R.id.blue);
gain = (CheckBox) findViewById(R.id.checkBox1);
mProgressBar = (ProgressBar) findViewById(R.id.progressBar);
final Button button = (Button) findViewById(R.id.start);
button.setText(startRecordingLabel);
bluetooth.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(View v) {
// TODO Auto-generated method stub
Intent i = new Intent("");
}
});
gain.setOnCheckedChangeListener(new CompoundButton.OnCheckedChangeListener() {
#Override
public void onCheckedChanged(CompoundButton buttonView,
boolean isChecked) {
if (gain.isChecked()) {
iGain = 5.0f;
} else {
iGain = 2.0f;
}
}
});
button.setOnClickListener(new View.OnClickListener() {
#Override
public void onClick(final View v) {
if (!mIsRecording) {
button.setText(stopRecordingLabel);
mIsRecording = true;
mRecorder.startRecording();
mRecording = getFile("raw");
startBufferedWrite(mRecording);
} else {
button.setText(startRecordingLabel);
mIsRecording = false;
mRecorder.stop();
File waveFile = getFile("wav");
try {
rawToWave(mRecording, waveFile);
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
}
Toast.makeText(MainActivity.this,
"Recorded to " + waveFile.getName(),
Toast.LENGTH_SHORT).show();
}
}
});
}
#Override
public void onDestroy() {
mRecorder.release();
super.onDestroy();
}
private void initRecorder() {
int bufferSize = AudioRecord.getMinBufferSize(SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT);
mBuffer = new short[bufferSize];
mRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC, SAMPLE_RATE,
AudioFormat.CHANNEL_IN_MONO, AudioFormat.ENCODING_PCM_16BIT,
bufferSize);
}
private void startBufferedWrite(final File file) {
new Thread(new Runnable() {
#Override
public void run() {
DataOutputStream output = null;
try {
output = new DataOutputStream(new BufferedOutputStream(
new FileOutputStream(file)));
while (mIsRecording) {
double sum = 0;
int readSize = mRecorder.read(mBuffer, 0,
mBuffer.length);
final int bytesPerSample = bitsPerSamples / 8;
final int emptySpace = 64 - bitsPerSamples;
int byteIndex = 0;
int byteIndex2 = 0;
int temp = 0;
int mLeftTemp = 0;
int mRightTemp = 0;
int a = 0;
int x = 0;
for (int frameIndex = 0; frameIndex < readSize; frameIndex++) {
for (int c = 0; c < 1; c++) {
if (iGain != 1) {
long accumulator = 0;
for (int b = 0; b < bytesPerSample; b++) {
accumulator += ((long) (mBuffer[byteIndex++] & 0xFF)) << (b * 8 + emptySpace);
}
double sample = ((double) accumulator / (double) Long.MAX_VALUE);
sample *= iGain;
int intValue = (int) ((double) sample * (double) Integer.MAX_VALUE);
for (int i = 0; i < bytesPerSample; i++) {
mBuffer[i + byteIndex2] = (byte) (intValue >>> ((i + 2) * 8) & 0xff);
}
byteIndex2 += bytesPerSample;
}
}// end for(channel)
// mBuffer[frameIndex] *=iGain;
if (mBuffer[frameIndex] > 32765) {
mBuffer[frameIndex] = 32767;
} else if (mBuffer[frameIndex] < -32767) {
mBuffer[frameIndex] = -32767;
}
output.writeShort(mBuffer[frameIndex]);
sum += mBuffer[frameIndex] * mBuffer[frameIndex];
}
if (readSize > 0) {
final double amplitude = sum / readSize;
mProgressBar.setProgress((int) Math.sqrt(amplitude));
}
}
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
} finally {
mProgressBar.setProgress(0);
if (output != null) {
try {
output.flush();
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
} finally {
try {
output.close();
} catch (IOException e) {
Toast.makeText(MainActivity.this, e.getMessage(),
Toast.LENGTH_SHORT).show();
}
}
}
}
}
}).start();
}
private void rawToWave(final File rawFile, final File waveFile)
throws IOException {
byte[] rawData = new byte[(int) rawFile.length()];
DataInputStream input = null;
try {
input = new DataInputStream(new FileInputStream(rawFile));
input.read(rawData);
} finally {
if (input != null) {
input.close();
}
}
DataOutputStream output = null;
try {
output = new DataOutputStream(new FileOutputStream(waveFile));
// WAVE header
// see http://ccrma.stanford.edu/courses/422/projects/WaveFormat/
writeString(output, "RIFF"); // chunk id
writeInt(output, 36 + rawData.length); // chunk size
writeString(output, "WAVE"); // format
writeString(output, "fmt "); // subchunk 1 id
writeInt(output, 16); // subchunk 1 size
writeShort(output, (short) 1); // audio format (1 = PCM)
writeShort(output, (short) 1); // number of channels
writeInt(output, SAMPLE_RATE); // sample rate
writeInt(output, SAMPLE_RATE * 2); // byte rate
writeShort(output, (short) 2); // block align
writeShort(output, (short) 16); // bits per sample
writeString(output, "data"); // subchunk 2 id
writeInt(output, rawData.length); // subchunk 2 size
// Audio data (conversion big endian -> little endian)
short[] shorts = new short[rawData.length / 2];
ByteBuffer.wrap(rawData).order(ByteOrder.LITTLE_ENDIAN)
.asShortBuffer().get(shorts);
ByteBuffer bytes = ByteBuffer.allocate(shorts.length * 2);
for (short s : shorts) {
// Apply Gain
/*
* s *= iGain; if(s>32767) { s=32767; } else if(s<-32768) {
* s=-32768; }
*/
bytes.putShort(s);
}
output.write(bytes.array());
} finally {
if (output != null) {
output.close();
}
}
}
private File getFile(final String suffix) {
Time time = new Time();
time.setToNow();
return new File(Environment.getExternalStorageDirectory(),
time.format("%Y%m%d%H%M%S") + "." + suffix);
}
private void writeInt(final DataOutputStream output, final int value)
throws IOException {
output.write(value >> 0);
output.write(value >> 8);
output.write(value >> 16);
output.write(value >> 24);
}
private void writeShort(final DataOutputStream output, final short value)
throws IOException {
output.write(value >> 0);
output.write(value >> 8);
}
private void writeString(final DataOutputStream output, final String value)
throws IOException {
for (int i = 0; i < value.length(); i++) {
output.write(value.charAt(i));
}
}
}
I have a frequency indicator sound recorder which record sound successfully but it record for only 4 seconds.I need more than 4 seconds to record the code is given below:
public class vr1 extends Activity
{
private static final int SAMPLE_RATE_IN_HZ = 8000;
private static final int RECORDER_BPP = 16;
private static final int CHANNEL_CONFIG = AudioFormat.CHANNEL_IN_MONO;
private static final int AUDIO_FORMAT = AudioFormat.ENCODING_PCM_16BIT;
private static final int AUDIO_SOURCE = MediaRecorder.AudioSource.MIC;
private static final String TAG = "VoiceDetection";
private static final int MAX_VOL = 600;
//private static final int MAX_VOL = 15000;
private static final int MIN_VAL = 0;
private static final int START_RECORD_FROM = 350;
private static final int CHECK_BLOCK_COUNT = 3;
private Thread onCreateThread = null;
private AudioRecord audioRecorder = null;
private int minBufferSizeInBytes = 0;
/** Called when the activity is first created. */
#Override
public void onCreate(Bundle savedInstanceState)
{
super.onCreate(savedInstanceState);
setContentView(R.layout.porec);
TextView textView = (TextView)findViewById(R.id.textView3);
Button button = (Button)findViewById(R.id.button1);
button.setVisibility( android.view.View.INVISIBLE );
textView.setVisibility( android.view.View.VISIBLE );
Start();
}
public void Start( )
{
// Create Handler
final Handler handler = new Handler() {
#Override
public void handleMessage(Message msg)
{
int value = msg.what;
ImageView picture = (ImageView) findViewById(R.id.imageView);
int resID[] = {
R.drawable.image0001, R.drawable.image20, R.drawable.image40,
R.drawable.image60, R.drawable.image80, R.drawable.image100
};
for( int i=MIN_VAL, step = (MAX_VOL - MIN_VAL)/resID.length; i<resID.length; i++ ) {
if( value >= i*step && value <= i*step + step )
picture.setImageResource( resID[i] );
}
// Set image for maximum value.
if( value >= MAX_VOL )
picture.setImageResource( R.drawable.image100 );
}
};
// Text change handler
final Handler changeTexthandler = new Handler() {
#Override
public void handleMessage(Message msg)
{
TextView textView = (TextView)findViewById(R.id.textView3);
Button button = (Button)findViewById(R.id.button1);
switch( msg.what )
{
case 0:
textView.setText("Waiting");
button.setVisibility( android.view.View.INVISIBLE );
textView.setVisibility( android.view.View.VISIBLE );
break;
case 1:
textView.setText("Recording");
button.setVisibility( android.view.View.INVISIBLE );
textView.setVisibility( android.view.View.VISIBLE );
break;
default:
button.setVisibility( android.view.View.VISIBLE );
textView.setVisibility( android.view.View.INVISIBLE );
break;
}
}
};
// Initialize minimum buffer size in bytes.
minBufferSizeInBytes = AudioRecord.getMinBufferSize( SAMPLE_RATE_IN_HZ,
CHANNEL_CONFIG,
AUDIO_FORMAT
);
if( minBufferSizeInBytes == AudioRecord.ERROR_BAD_VALUE )
Log.e( TAG, "Bad Value for \"minBufferSize\", recording parameters are not supported by the hardware" );
if( minBufferSizeInBytes == AudioRecord.ERROR )
Log.e( TAG, "Bad Value for \"minBufferSize\", implementation was unable to query the hardware for its output properties" );
// Initialize Audio Recorder.
try {
audioRecorder = new AudioRecord( AUDIO_SOURCE,
SAMPLE_RATE_IN_HZ,
CHANNEL_CONFIG,
AUDIO_FORMAT,
minBufferSizeInBytes
);
}
catch(IllegalArgumentException ex) {
Log.e( TAG, "Illegal Arguments: " + ex.getMessage() );
}
// Launch Thread.
onCreateThread = new Thread( new Runnable() {
#Override
public void run()
{
// Starts recording from the AudioRecord instance.
audioRecorder.startRecording();
int numberOfBytesRead = 0;
byte audioBuffer[] = new byte[minBufferSizeInBytes];
float tempBuffer[] = new float[CHECK_BLOCK_COUNT];
int tempIndex = 0;
boolean isRecording = false;
int totalReadBytes = 0;
byte totalByteBuffer[] = new byte[60 * 44100 * 2];
// While data coming from microphone.
while( true )
{
float totalAbsValue = 0.0f;
short sample = 0;
numberOfBytesRead = audioRecorder.read( audioBuffer, 0, minBufferSizeInBytes );
// Analyze income sound.
for( int i=0; i<minBufferSizeInBytes; i+=2 ) {
sample = (short)( (audioBuffer[i]) | audioBuffer[i + 1] << 8 );
totalAbsValue += Math.abs( sample ) / (numberOfBytesRead/2);
}
// Set Animation of microphone.
handler.sendEmptyMessage((int)totalAbsValue);
// Analyze tempBuffer.
tempBuffer[tempIndex%CHECK_BLOCK_COUNT] = totalAbsValue;
float tempBufferTotalCount = 0.0f;
for( int i=0; i<CHECK_BLOCK_COUNT; ++i )
tempBufferTotalCount += tempBuffer[i];
// Finalize value.
tempBufferTotalCount = tempBufferTotalCount/CHECK_BLOCK_COUNT;
// Waiting for load speak to start recording.
if( (tempBufferTotalCount >=0 && tempBufferTotalCount <= START_RECORD_FROM) && !isRecording )
{
Log.i("TAG", "Waiting for voice to start record.");
tempIndex++;
changeTexthandler.sendEmptyMessage(0);
continue;
}
if( tempBufferTotalCount > START_RECORD_FROM && !isRecording )
{
Log.i("TAG", "Recording");
changeTexthandler.sendEmptyMessage(1);
isRecording = true;
}
// Stop Recording and save data to file.
if( (tempBufferTotalCount >= 0 && tempBufferTotalCount <= START_RECORD_FROM) && isRecording )
{
Log.i("TAG", "Stop Recording and Save data to file");
changeTexthandler.sendEmptyMessage(2);
audioRecorder.stop();
audioRecorder.release();
audioRecorder = null;
SaveDataToFile( totalReadBytes, totalByteBuffer );
totalReadBytes = 0;
tempIndex++;
break;
}
// Record Sound.
for( int i=0; i<numberOfBytesRead; i++ )
totalByteBuffer[totalReadBytes + i] = audioBuffer[i];
totalReadBytes += numberOfBytesRead;
tempIndex++;
}
}
}, "Voice Detection Thread" );
// Run the Thread.
onCreateThread.start();
//*/
}
public void SaveDataToFile( int totalReadBytes, byte[] totalByteBuffer )
{
// Save audio to file.
String filepath = Environment.getExternalStorageDirectory().getPath();
File file = new File( filepath, "VioceDetectionDemo" );
if( !file.exists( ) )
file.mkdirs();
// String fileName = file.getAbsolutePath() + "/" + System.currentTimeMillis() + ".wav";
// File always saved by same name.
String fileName = file.getAbsolutePath() + "/" + "VoiceDetectionDemo" + ".wav";
long totalAudioLen = 0;
long totalDataLen = totalAudioLen + 36;
long longSampleRate = SAMPLE_RATE_IN_HZ;
int channels = 1;
long byteRate = RECORDER_BPP * SAMPLE_RATE_IN_HZ * channels/8;
totalAudioLen = totalReadBytes;
totalDataLen = totalAudioLen + 36;
byte finalBuffer[] = new byte[totalReadBytes + 44];
finalBuffer[0] = 'R'; // RIFF/WAVE header
finalBuffer[1] = 'I';
finalBuffer[2] = 'F';
finalBuffer[3] = 'F';
finalBuffer[4] = (byte) (totalDataLen & 0xff);
finalBuffer[5] = (byte) ((totalDataLen >> 8) & 0xff);
finalBuffer[6] = (byte) ((totalDataLen >> 16) & 0xff);
finalBuffer[7] = (byte) ((totalDataLen >> 24) & 0xff);
finalBuffer[8] = 'W';
finalBuffer[9] = 'A';
finalBuffer[10] = 'V';
finalBuffer[11] = 'E';
finalBuffer[12] = 'f'; // 'fmt ' chunk
finalBuffer[13] = 'm';
finalBuffer[14] = 't';
finalBuffer[15] = ' ';
finalBuffer[16] = 16; // 4 bytes: size of 'fmt ' chunk
finalBuffer[17] = 0;
finalBuffer[18] = 0;
finalBuffer[19] = 0;
finalBuffer[20] = 1; // format = 1
finalBuffer[21] = 0;
finalBuffer[22] = (byte) channels;
finalBuffer[23] = 0;
finalBuffer[24] = (byte) (longSampleRate & 0xff);
finalBuffer[25] = (byte) ((longSampleRate >> 8) & 0xff);
finalBuffer[26] = (byte) ((longSampleRate >> 16) & 0xff);
finalBuffer[27] = (byte) ((longSampleRate >> 24) & 0xff);
finalBuffer[28] = (byte) (byteRate & 0xff);
finalBuffer[29] = (byte) ((byteRate >> 8) & 0xff);
finalBuffer[30] = (byte) ((byteRate >> 16) & 0xff);
finalBuffer[31] = (byte) ((byteRate >> 24) & 0xff);
finalBuffer[32] = (byte) (2 * 16 / 8); // block align
finalBuffer[33] = 0;
finalBuffer[34] = RECORDER_BPP; // bits per sample
finalBuffer[35] = 0;
finalBuffer[36] = 'd';
finalBuffer[37] = 'a';
finalBuffer[38] = 't';
finalBuffer[39] = 'a';
finalBuffer[40] = (byte) (totalAudioLen & 0xff);
finalBuffer[41] = (byte) ((totalAudioLen >> 8) & 0xff);
finalBuffer[42] = (byte) ((totalAudioLen >> 16) & 0xff);
finalBuffer[43] = (byte) ((totalAudioLen >> 24) & 0xff);
for( int i=0; i<totalReadBytes; ++i )
finalBuffer[44+i] = totalByteBuffer[i];
FileOutputStream out;
try {
out = new FileOutputStream(fileName);
try {
out.write(finalBuffer);
out.close();
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
} catch (FileNotFoundException e1) {
// TODO Auto-generated catch block
e1.printStackTrace();
}
}
public void OnClickButtonTryAgain(View view)
{
Start();
}
}