Using below code I want to convert .pcm file to .wav file in android application.
public class WavConvertor {
public void rawToWave(final File rawFile, final File waveFile) throws IOException {
byte[] rawData = new byte[(int) rawFile.length()];
DataInputStream input = null;
try {
input = new DataInputStream(new FileInputStream(rawFile));
input.readFully(rawData);
} finally {
if (input != null) {
input.close();
}
}
DataOutputStream output = null;
try {
output = new DataOutputStream(new FileOutputStream(waveFile));
// WAVE header
// see http://ccrma.stanford.edu/courses/422/projects/WaveFormat/
writeString(output, "RIFF"); // chunk id
writeInt(output, 36 + rawData.length); // chunk size
writeString(output, "WAVE"); // format
writeString(output, "fmt "); // subchunk 1 id
writeInt(output, 16); // subchunk 1 size
writeShort(output, (short) 1); // audio format (1 = PCM)
writeShort(output, (short) 1); // number of channels
writeInt(output, MyApplicationVar.getInstance().SAMPLING_RATE); // sample rate
writeInt(output, MyApplicationVar.getInstance().SAMPLING_RATE * 2); // byte rate
writeShort(output, (short) 2); // block align
writeShort(output, (short) 16); // bits per sample
writeString(output, "data"); // subchunk 2 id
writeInt(output, rawData.length); // subchunk 2 size
// Audio data (conversion big endian -> little endian)
short[] shorts = new short[rawData.length / 2];
ByteBuffer.wrap(rawData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shorts);
ByteBuffer bytes = ByteBuffer.allocate(shorts.length * 2);
for (short s : shorts) {
bytes.putShort(s);
}
output.write(fullyReadFileToBytes(rawFile));
} finally {
if (output != null) {
output.close();
}
}
}
byte[] fullyReadFileToBytes(File f) throws IOException {
int size = (int) f.length();
byte bytes[] = new byte[size];
byte tmpBuff[] = new byte[size];
FileInputStream fis= new FileInputStream(f);
try {
int read = fis.read(bytes, 0, size);
if (read < size) {
int remain = size - read;
while (remain > 0) {
read = fis.read(tmpBuff, 0, remain);
System.arraycopy(tmpBuff, 0, bytes, size - remain, read);
remain -= read;
}
}
} catch (IOException e){
throw e;
} finally {
fis.close();
}
return bytes;
}
private void writeInt(final DataOutputStream output, final int value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
output.write(value >> 16);
output.write(value >> 24);
}
private void writeShort(final DataOutputStream output, final short value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
}
private void writeString(final DataOutputStream output, final String value) throws IOException {
for (int i = 0; i < value.length(); i++) {
output.write(value.charAt(i));
}
}
}
for small files it is ok but for large files when convertin I get this error . I searched and used some solutions but they didn't work
java.lang.OutOfMemoryError: Failed to allocate a 160369676 byte allocation with 16777216 free bytes and 46MB until OOM
Is there any way to read the pcm file part by part and write them?
BTW
I had set
android:largeHeap="true"
android:hardwareAccelerated="false"
in manifest but didn't work
Related
i get data from a wav file and want to write byte back to wav .
i have already got the byte from wav.here is my code
i know 0-43 bytes are header and 44- are data
byte[] soundBytes;
try {
InputStream inputStream = new FileInputStream(getRealPathFromURI(this,uri));
soundBytes = toByteArray(inputStream);
for (int i = 0; i != -1 ; i++) {
if(soundBytes[i]<0) {
k =soundBytes[i] + 256;
} else {k = soundBytes[i];}
System.out.println("byte " + i + ": " + k );
}
} catch(Exception e) {
e.printStackTrace();
}
}
}
public byte[] toByteArray(InputStream in) throws IOException {
ByteArrayOutputStream out = new ByteArrayOutputStream();
int read = 0;
byte[] buffer = new byte[1024];
while (read != -1) {
read = in.read(buffer);
if (read != -1)
out.write(buffer,0,read);
}
out.close();
return out.toByteArray();
}
The below code is converting raw pcm file into raw file. As you said you got the raw bytes you either follow creating a raw file with these bytes you got and send that file into following rawToWave() method or manipulate rawToWave() method to make require convert bytes[] into raw file directly.
public void rawToWave(final File rawFile) throws IOException {
File waveFile = DirectoryOperations.createDirAndAudioFile("vocal.wav");// creating the empty wav file.
byte[] rawData = new byte[(int) rawFile.length()];
DataInputStream input = null;
try {
input = new DataInputStream(new FileInputStream(rawFile));
input.read(rawData);
} finally {
if (input != null) {
input.close();
}
}
DataOutputStream output = null;//following block is converting raw to wav.
try {
output = new DataOutputStream(new FileOutputStream(waveFile));
// WAVE header
// see http://ccrma.stanford.edu/courses/422/projects/WaveFormat/
writeString(output, "RIFF"); // chunk id
writeInt(output, 36 + rawData.length); // chunk size
writeString(output, "WAVE"); // format
writeString(output, "fmt "); // subchunk 1 id
writeInt(output, 16); // subchunk 1 size
writeShort(output, (short) 1); // audio format (1 = PCM)
writeShort(output, (short) 1); // number of channels
writeInt(output, RECORDER_SAMPLERATE); // sample rate
writeInt(output, RECORDER_SAMPLERATE * 2); // byte rate
writeShort(output, (short) 2); // block align
writeShort(output, (short) 16); // bits per sample
writeString(output, "data"); // subchunk 2 id
writeInt(output, rawData.length); // subchunk 2 size
output.write(fullyReadFileToBytes(rawFile));
} finally {
if (output != null) {
output.close();
}
}
}
private byte[] fullyReadFileToBytes(File f) throws IOException {
int size = (int) f.length();
byte bytes[] = new byte[size];
byte tmpBuff[] = new byte[size];
try (FileInputStream fis = new FileInputStream(f)) {
int read = fis.read(bytes, 0, size);
if (read < size) {
int remain = size - read;
while (remain > 0) {
read = fis.read(tmpBuff, 0, remain);
System.arraycopy(tmpBuff, 0, bytes, size - remain, read);
remain -= read;
}
}
}
return bytes;
}
private void writeInt(final DataOutputStream output, final int value) throws IOException {
output.write(value);
output.write(value >> 8);
output.write(value >> 16);
output.write(value >> 24);
}
private void writeShort(final DataOutputStream output, final short value) throws IOException {
output.write(value);
output.write(value >> 8);
}
private void writeString(final DataOutputStream output, final String value) throws IOException {
for (int i = 0; i < value.length(); i++) {
output.write(value.charAt(i));
}
}
I am currently developing an Android Application that has audio recording and playing. I am new to dealing with audio and I'm having some trouble with encoding and formats.
I am able to record and play the audio in my application, but when exporting I am not able to reproduce the audio. The only way I found was exporting my .pcm file and converting using Audacity.
This is my code to record the audio is:
private Thread recordingThread
private AudioRecord mRecorder;
private boolean isRecording = false;
private void startRecording() {
mRecorder = new AudioRecord(MediaRecorder.AudioSource.MIC,
Constants.RECORDER_SAMPLERATE, Constants.RECORDER_CHANNELS,
Constants.RECORDER_AUDIO_ENCODING, Constants.BufferElements2Rec * Constants.BytesPerElement);
mRecorder.startRecording();
isRecording = true;
recordingThread = new Thread(new Runnable() {
public void run() {
writeAudioDataToFile();
}
}, "AudioRecorder Thread");
recordingThread.start();
}
private void writeAudioDataToFile() {
// Write the output audio in byte
FileOutputStream os = null;
try {
os = new FileOutputStream(mFileName);
} catch (FileNotFoundException e) {
e.printStackTrace();
}
while (isRecording) {
// gets the voice output from microphone to byte format
mRecorder.read(sData, 0, Constants.BufferElements2Rec);
try {
// // writes the data to file from buffer
// // stores the voice buffer
byte bData[] = short2byte(sData);
os.write(bData, 0, Constants.BufferElements2Rec * Constants.BytesPerElement);
} catch (IOException e) {
e.printStackTrace();
}
}
try {
os.close();
} catch (IOException e) {
e.printStackTrace();
}
}
To play the recorded audio, the code is:
private void startPlaying() {
new Thread(new Runnable() {
public void run() {
try {
File file = new File(mFileName);
byte[] audioData = null;
InputStream inputStream = new FileInputStream(mFileName);
audioData = new byte[Constants.BufferElements2Rec];
mPlayer = new AudioTrack(AudioManager.STREAM_MUSIC, Constants.RECORDER_SAMPLERATE,
AudioFormat.CHANNEL_OUT_MONO, Constants.RECORDER_AUDIO_ENCODING,
Constants.BufferElements2Rec * Constants.BytesPerElement, AudioTrack.MODE_STREAM);
final float duration = (float) file.length() / Constants.RECORDER_SAMPLERATE / 2;
Log.i(TAG, "PLAYBACK AUDIO");
Log.i(TAG, String.valueOf(duration));
mPlayer.setPositionNotificationPeriod(Constants.RECORDER_SAMPLERATE / 10);
mPlayer.setNotificationMarkerPosition(Math.round(duration * Constants.RECORDER_SAMPLERATE));
mPlayer.play();
int i = 0;
while ((i = inputStream.read(audioData)) != -1) {
try {
mPlayer.write(audioData, 0, i);
} catch (Exception e) {
Log.e(TAG, "Exception: " + e.getLocalizedMessage());
}
}
} catch (FileNotFoundException fe) {
Log.e(TAG, "File not found: " + fe.getLocalizedMessage());
} catch (IOException io) {
Log.e(TAG, "IO Exception: " + io.getLocalizedMessage());
}
}
}).start();
}
The constants defined in a Constants class are:
public class Constants {
final static public int RECORDER_SAMPLERATE = 44100;
final static public int RECORDER_CHANNELS = AudioFormat.CHANNEL_IN_MONO;
final static public int RECORDER_AUDIO_ENCODING = AudioFormat.ENCODING_PCM_16BIT;
final static public int BufferElements2Rec = 1024; // want to play 2048 (2K) since 2 bytes we use only 1024
final static public int BytesPerElement = 2; // 2 bytes in 16bit format
}
If I export the file as it is, I convert it with Audacity and it plays. I do, however, need to export it in a format that can be played automatically.
I've seen answers to implement Lame and am currently working on it. I've also found an answer to convert it using:
private File rawToWave(final File rawFile, final String filePath) throws IOException {
File waveFile = new File(filePath);
byte[] rawData = new byte[(int) rawFile.length()];
DataInputStream input = null;
try {
input = new DataInputStream(new FileInputStream(rawFile));
input.read(rawData);
} finally {
if (input != null) {
input.close();
}
}
DataOutputStream output = null;
try {
output = new DataOutputStream(new FileOutputStream(waveFile));
// WAVE header
// see http://ccrma.stanford.edu/courses/422/projects/WaveFormat/
writeString(output, "RIFF"); // chunk id
writeInt(output, 36 + rawData.length); // chunk size
writeString(output, "WAVE"); // format
writeString(output, "fmt "); // subchunk 1 id
writeInt(output, 16); // subchunk 1 size
writeShort(output, (short) 1); // audio format (1 = PCM)
writeShort(output, (short) 1); // number of channels
writeInt(output, Constants.RECORDER_SAMPLERATE); // sample rate
writeInt(output, Constants.RECORDER_SAMPLERATE * 2); // byte rate
writeShort(output, (short) 2); // block align
writeShort(output, (short) 16); // bits per sample
writeString(output, "data"); // subchunk 2 id
writeInt(output, rawData.length); // subchunk 2 size
// Audio data (conversion big endian -> little endian)
short[] shorts = new short[rawData.length / 2];
ByteBuffer.wrap(rawData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shorts);
ByteBuffer bytes = ByteBuffer.allocate(shorts.length * 2);
for (short s : shorts) {
bytes.putShort(s);
}
output.write(bytes.array());
} finally {
if (output != null) {
output.close();
}
}
return waveFile;
}
private void writeInt(final DataOutputStream output, final int value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
output.write(value >> 16);
output.write(value >> 24);
}
private void writeShort(final DataOutputStream output, final short value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
}
private void writeString(final DataOutputStream output, final String value) throws IOException {
for (int i = 0; i < value.length(); i++) {
output.write(value.charAt(i));
}
}
But this, when exported, plays with the correct duration but just white noise.
Some of the answers that I've tried but wasn't able to work:
Android:Creating Wave file using Raw PCM, the wave file does not play
How to convert PCM raw data to mp3 file?
converting pcm file to mp3 using liblame in android
Anyone can point out what is the best solution? Is it really implementing lame or can it be done on a more straight forward way? If so, why is the code sample converting the file to just white noise?
You've got most of the code correct. The only issue that I can see is the part where you write the PCM data to the WAV file. This should be quite simple to do because WAV = Metadata + PCM (in that order). This should work:
private void rawToWave(final File rawFile, final File waveFile) throws IOException {
byte[] rawData = new byte[(int) rawFile.length()];
DataInputStream input = null;
try {
input = new DataInputStream(new FileInputStream(rawFile));
input.read(rawData);
} finally {
if (input != null) {
input.close();
}
}
DataOutputStream output = null;
try {
output = new DataOutputStream(new FileOutputStream(waveFile));
// WAVE header
// see http://ccrma.stanford.edu/courses/422/projects/WaveFormat/
writeString(output, "RIFF"); // chunk id
writeInt(output, 36 + rawData.length); // chunk size
writeString(output, "WAVE"); // format
writeString(output, "fmt "); // subchunk 1 id
writeInt(output, 16); // subchunk 1 size
writeShort(output, (short) 1); // audio format (1 = PCM)
writeShort(output, (short) 1); // number of channels
writeInt(output, 44100); // sample rate
writeInt(output, RECORDER_SAMPLERATE * 2); // byte rate
writeShort(output, (short) 2); // block align
writeShort(output, (short) 16); // bits per sample
writeString(output, "data"); // subchunk 2 id
writeInt(output, rawData.length); // subchunk 2 size
// Audio data (conversion big endian -> little endian)
short[] shorts = new short[rawData.length / 2];
ByteBuffer.wrap(rawData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shorts);
ByteBuffer bytes = ByteBuffer.allocate(shorts.length * 2);
for (short s : shorts) {
bytes.putShort(s);
}
output.write(fullyReadFileToBytes(rawFile));
} finally {
if (output != null) {
output.close();
}
}
}
byte[] fullyReadFileToBytes(File f) throws IOException {
int size = (int) f.length();
byte bytes[] = new byte[size];
byte tmpBuff[] = new byte[size];
FileInputStream fis= new FileInputStream(f);
try {
int read = fis.read(bytes, 0, size);
if (read < size) {
int remain = size - read;
while (remain > 0) {
read = fis.read(tmpBuff, 0, remain);
System.arraycopy(tmpBuff, 0, bytes, size - remain, read);
remain -= read;
}
}
} catch (IOException e){
throw e;
} finally {
fis.close();
}
return bytes;
}
private void writeInt(final DataOutputStream output, final int value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
output.write(value >> 16);
output.write(value >> 24);
}
private void writeShort(final DataOutputStream output, final short value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
}
private void writeString(final DataOutputStream output, final String value) throws IOException {
for (int i = 0; i < value.length(); i++) {
output.write(value.charAt(i));
}
}
How to use
It's quite simple to use. Just call it like this:
File f1 = new File("/sdcard/44100Sampling-16bit-mono-mic.pcm"); // The location of your PCM file
File f2 = new File("/sdcard/44100Sampling-16bit-mono-mic.wav"); // The location where you want your WAV file
try {
rawToWave(f1, f2);
} catch (IOException e) {
e.printStackTrace();
}
How all this works
As you can see, the WAV header is the only difference between WAV and PCM file formats. The assumption is that you are recording 16 bit PCM MONO audio (which according to your code, you are). The rawToWave function just neatly adds headers to the WAV file, so that music players know what to expect when your file is opened, and then after the headers, it just writes the PCM data from the last bit onwards.
Cool Tip
If you want to shift the pitch of your voice, or make a voice changer app, all you got to do is increase/decrease the value of writeInt(output, 44100); // sample rate in your code. Decreasing it will tell the player to play it at a different rate thereby changing the output pitch. Just a little extra 'good to know' thing. :)
I know it is late and you got your stuff working with MediaRecorder. But thought of sharing my answer as it took me some good time to find it. :)
When you record your audio, the data is read as short from your AudioRecord object and it is then converted to bytes before storing in the .pcm file.
Now, when you write the .wav file, you're again doing the short conversion. This is not required. So, in your code if you remove the following block and write the rawData directly to the end of .wav file. It will work just fine.
short[] shorts = new short[rawData.length / 2];
ByteBuffer.wrap(rawData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shorts);
ByteBuffer bytes = ByteBuffer.allocate(shorts.length * 2);
for (short s : shorts) {
bytes.putShort(s);
}
Check the below piece of code you'll get after removing the duplicate block of code.
writeInt(output, rawData.length); // subchunk 2 size
// removed the duplicate short conversion
output.write(rawData);
Just to register, I solved my need of recording an audio playable in common players using MediaRecorder instead of Audio Recorder.
To start recording:
MediaRecorder mRecorder = new MediaRecorder();
mRecorder.setAudioSource(MediaRecorder.AudioSource.MIC);
mRecorder.setOutputFormat(MediaRecorder.OutputFormat.THREE_GPP);
mRecorder.setAudioEncoder(MediaRecorder.OutputFormat.AMR_NB);
mRecorder.setOutputFile(Environment.getExternalStorageDirectory()
.getAbsolutePath() + "/recording.3gp");
mRecorder.prepare();
mRecorder.start();
And to play the recording:
mPlayer = new MediaPlayer();
mPlayer.setDataSource(Environment.getExternalStorageDirectory()
.getAbsolutePath() + "/recording.3gp");
mPlayer.prepare();
mPlayer.start();
I tried the above code for audio recording writeAudioDataToFile(). It perfectly records and convert the audio into .wav format. But when I played the recorded audio, it was too fast. 5sec audio completed in 2.5 sec. Then I observed it was because of this short2byte() function.
For those how are having same problem should not use short2byte() and directly write sData in line os.write(sData, 0, Constants.BufferElements2Rec * Constants.BytesPerElement); where sData should be byte[].
I read these 2 articles:
Writing PCM recorded data into a .wav file (java android)
http://soundfile.sapp.org/doc/WaveFormat/
And created my program that converts .pcm file into .wav file. But when I open it with VLC, it doesn't play it. I have pcm file that works perfect so I don't know why my program doesn't work. I am putting all fields(except strings) inside a file in little-endian format with reverseBytes() function. What did I do wrong?
EDIT: Code works now
public class MainActivity extends AppCompatActivity {
private String pahtFile = "/sdcard/voice8K16bitmono.pcm";
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
try {
byte[] clipData = getBytesFromFile(new File(pahtFile));
myDatasize = clipData.length; //262 144 bytes in my PCM file
/** Data for creating "The canonical WAVE file format"
http://soundfile.sapp.org/doc/WaveFormat/
*/
int BitsPerSample = 16;
String ChunkID = "RIFF";
String Format = "WAVE";
String Subchunk1ID = "fmt ";
int mySubChunk1Size = 16;
short AudioFormat = 1;
short NumChannels = 1;
int SampleRate = 44100;
int ByteRate = SampleRate*((int)NumChannels)*BitsPerSample/8;
short BlockAlign = (short) (NumChannels*((short)(BitsPerSample/8)));
String Subchunk2ID = "data";
int Subchunk2Size = (int)myDatasize;
int chunkSize = 36 + Subchunk2Size;
Log.d("SEBALOG","BlockAlign: " + BlockAlign);
OutputStream os;
os = new FileOutputStream(new File("/sdcard/firstWav.wav"));
BufferedOutputStream bos = new BufferedOutputStream(os);
DataOutputStream outFile = new DataOutputStream(bos);
outFile.writeBytes(ChunkID); // The riff chunk descriptor
outFile.writeInt(Integer.reverseBytes(chunkSize));
outFile.writeBytes(Format);
outFile.writeBytes(Subchunk1ID); // The fmt sub-chunk
outFile.writeInt(Integer.reverseBytes(mySubChunk1Size));
outFile.writeShort(Short.reverseBytes(AudioFormat));
outFile.writeShort(Short.reverseBytes(NumChannels));
outFile.writeInt(Integer.reverseBytes(SampleRate));
outFile.writeInt(Integer.reverseBytes(ByteRate));
outFile.writeShort(Short.reverseBytes(BlockAlign));
outFile.writeShort(Short.reverseBytes((short) BitsPerSample));
outFile.writeBytes(Subchunk2ID); // The "data" sub-chunk
outFile.writeInt(Integer.reverseBytes(Subchunk2Size));
outFile.write(clipData);
outFile.flush();
outFile.close();
} catch (IOException e) {
e.printStackTrace();
}
}
private byte[] getBytesFromFile(File file) throws IOException{
InputStream is = new FileInputStream(file);
// Get the size of the file
long length = file.length();
// You cannot create an array using a long type.
// It needs to be an int type.
// Before converting to an int type, check
// to ensure that file is not larger than Integer.MAX_VALUE.
if (length > Integer.MAX_VALUE) {
// File is too large
}
// Create the byte array to hold the data
byte[] bytes = new byte[(int)length];
// Read in the bytes
int offset = 0;
int numRead = 0;
while (offset < bytes.length
&& (numRead = is.read(bytes, offset, Math.min(bytes.length - offset, 512*1024))) >= 0) {
offset += numRead;
}
// Ensure all the bytes have been read in
if (offset < bytes.length) {
throw new IOException("Could not completely read file "+file.getName());
}
// Close the input stream and return bytes
is.close();
return bytes;
}
}
I am using this FFT library to perform the FFT on the sound caught by device microphone. I am using following code:
int bufferSize = AudioRecord.getMinBufferSize(frequency,
channelConfiguration, audioEncoding);
AudioRecord audioRecord = new AudioRecord(MediaRecorder.AudioSource.DEFAULT,
frequency, channelConfiguration, audioEncoding, bufferSize);
int bufferReadResult;
try {
audioRecord.startRecording();
} catch (IllegalStateException e) {
e.printStackTrace();
Log.e("Recording failed", e.toString());
}
while (true) {
bufferReadResult = audioRecord.read(buffer, 0, blockSize);
if (isCancelled())
break;
boolean bln = false;
for (int i = 0; i < blockSize && i < bufferReadResult; i++) {
toTransform[i] = (double) buffer[i] / 32768.0;
if (buffer[i] != 0) {
bln = true;
}
}
if (bln) {
transformer.ft(toTransform);
doStuff(toTransform);
} else {
try {
audioRecord = new AudioRecord(
MediaRecorder.AudioSource.DEFAULT, frequency,
channelConfiguration, audioEncoding, bufferSize);
audioRecord.startRecording();
} catch (IllegalStateException e) {
e.printStackTrace();
Log.e("Recording failed", e.toString());
}
}
if (isCancelled())
break;
}
try {
audioRecord.stop();
} catch (IllegalStateException e) {
e.printStackTrace();
Log.e("Stop failed", e.toString());
}
It works fine for microphone. But I want to do this transformation on sound file(in raw folder). I tried using InputStreamReader to read the sound file content, but how to use this file data to apply transformation on file? I tried using a buffer using following code:
InputStream ins;
ByteArrayOutputStream outputStream;
int size = 0;
byte[] buffer = new byte[1024];
ins = getResources().openRawResource(R.raw.fire);
outputStream = new ByteArrayOutputStream();
transformer = new RealDoubleFFT(blockSize);
while ((size = ins.read(buffer, 0, 1024)) >= 0) {
outputStream.write(buffer, 0, size);
if (isCancelled())
break;
boolean bln = false;
for (int i = 0; i < buffer.length; i++) {
toTransform[i] = (double) buffer[i] / 32768.0;
if (buffer[i] != 0) {
bln = true;
}
}
if (bln) {
transformer.ft(toTransform);
doStuff(toTransform);
}
}
ins.close();
But it is not returning the correct data. Maybe the data received from file is not what the library wants. Any ideas how to apply FFT on sound file?
You appear to be converting single bytes, but the format of a .wav/RIFF file, after the header (often 44 bytes), is usually 16-bit or 2-byte little-endian samples.
I want to create an audio mixer (DJ music track) kind of app which can create Dj mixer of an audio song. User can select a music song track that can be mixed with two or more separate rhythm, bass or beat tracks to create a new modified Dj music.
I did a lot of research over this but could not find any idea or clue.
If anyone have some idea or some reference URL regarding this, please share it.
There is no build-in library on Android that supports audio mixing (combining two audio input streams into one output stream). The Java javax.sound library which supports audio mixing was not ported to Android - there's an interesting discussion on Google Groups with Google engineer Diane Hackborn about the decision to not port javax.sound to Android.
It looks like you have to develop your own solution from scratch. There are several helpful answers on SO on how to combine two audio streams into one:
Mixing Audio Files
Audio editing in Android
Android - Mixing multiple static waveforms into a single AudioTrack
It sounds like the hardest part of this would be playing multiple tracks at once, and that the rest can be done with the UI. One link that might help you is How to play multiple ogg or mp3 at the same time..? The documentation for SoundPool, which lets you play multiple sounds at once, can be found here.
It is late but if someone needs, AudioMixer-android can be used.
File dir;
dir = new File(Environment.getExternalStorageDirectory().getAbsolutePath());
dir.mkdirs();
//Audio Mixer for two .raw file into single .wav file...
void AudioMixer() {
File file_play1 = new File(dir, "Neww.raw");
int shortSizeInBytes = Short.SIZE / Byte.SIZE;
int bufferSizeInBytes = (int) (file_play1.length() / shortSizeInBytes);
short[] audioData = new short[bufferSizeInBytes];
try {
InputStream inputStream = new FileInputStream(file_play1);
BufferedInputStream bufferedInputStream = new BufferedInputStream(inputStream);
DataInputStream dataInputStream = new DataInputStream(bufferedInputStream);
InputStream inputStream1 = getResources().openRawResource(R.raw.trainss); //Play form raw folder
BufferedInputStream bufferedInputStream1 = new BufferedInputStream(inputStream1);
DataInputStream dataInputStream1 = new DataInputStream(bufferedInputStream1);
int i = 0;
while (dataInputStream.available() > 0 && dataInputStream1.available() > 0) {
audioData[i] = (short) (dataInputStream.readShort() + dataInputStream1.readShort());
i++;
}
dataInputStream.close();
dataInputStream1.close();
AudioTrack audioTrack = new AudioTrack(
AudioManager.STREAM_MUSIC,
11025,
AudioFormat.CHANNEL_CONFIGURATION_MONO,
AudioFormat.ENCODING_PCM_16BIT,
bufferSizeInBytes,
AudioTrack.MODE_STREAM);
audioTrack.write(audioData, 0, bufferSizeInBytes);
//merge two .raw files in single .raw file...
File file_record = new File(dir, "testing.raw");
try {
OutputStream outputStream = new FileOutputStream(file_record);
BufferedOutputStream bufferedOutputStream = new BufferedOutputStream(outputStream);
DataOutputStream dataOutputStream = new DataOutputStream(bufferedOutputStream);
for (int j = 0; j < audioData.length; j++) {
dataOutputStream.writeShort(audioData[j]);
}
dataOutputStream.close();
} catch (IOException e) {
e.printStackTrace();
}
//Convert that .raw (testing.raw) file into .wav (testingNew.wav) file
File des = new File(dir, "testingNew.wav");
try {
rawToWave(file_record, des);
} catch (IOException e) {
e.printStackTrace();
}
} catch (FileNotFoundException e) {
e.printStackTrace();
} catch (IOException e) {
e.printStackTrace();
} catch (IllegalStateException e) {
e.printStackTrace();
}
}
//convert .raw file to .wav File...
private void rawToWave(final File rawFile, final File waveFile) throws IOException {
byte[] rawData = new byte[(int) rawFile.length()];
DataInputStream input = null;
try {
input = new DataInputStream(new FileInputStream(rawFile));
input.read(rawData);
} finally {
if (input != null) {
input.close();
}
}
DataOutputStream output = null;
try {
output = new DataOutputStream(new FileOutputStream(waveFile));
// WAVE header
writeString(output, "RIFF"); // chunk id
writeInt(output, 36 + rawData.length); // chunk size
writeString(output, "WAVE"); // format
writeString(output, "fmt "); // subchunk 1 id
writeInt(output, 16); // subchunk 1 size
writeShort(output, (short) 1); // audio format (1 = PCM)
writeShort(output, (short) 1); // number of channels
writeInt(output, SAMPLE_RATE); // sample rate
writeInt(output, SAMPLE_RATE * 2); // byte rate
writeShort(output, (short) 2); // block align
writeShort(output, (short) 16); // bits per sample
writeString(output, "data"); // subchunk 2 id
writeInt(output, rawData.length); // subchunk 2 size
// Audio data (conversion big endian -> little endian)
short[] shorts = new short[rawData.length / 2];
ByteBuffer.wrap(rawData).order(ByteOrder.LITTLE_ENDIAN).asShortBuffer().get(shorts);
ByteBuffer bytes = ByteBuffer.allocate(shorts.length * 2);
for (short s : shorts) {
bytes.putShort(s);
}
output.write(bytes.array());
} finally {
if (output != null) {
output.close();
}
}
}
private void writeInt(final DataOutputStream output, final int value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
output.write(value >> 16);
output.write(value >> 24);
}
private void writeShort(final DataOutputStream output, final short value) throws IOException {
output.write(value >> 0);
output.write(value >> 8);
}
private void writeString(final DataOutputStream output, final String value) throws IOException {
for (int i = 0; i < value.length(); i++) {
output.write(value.charAt(i));
}
}
//playing merged file...
private void playWavFile() {
MediaPlayer recorded_audio_in_sounds = new MediaPlayer();
String outputFile = Environment.getExternalStorageDirectory().getAbsolutePath() + "/testingNew.wav";
try {
if (recorded_audio_in_sounds != null) {
if (recorded_audio_in_sounds.isPlaying()) {
recorded_audio_in_sounds.pause();
recorded_audio_in_sounds.stop();
recorded_audio_in_sounds.reset();
recorded_audio_in_sounds.setDataSource(outputFile);
recorded_audio_in_sounds.prepare();
recorded_audio_in_sounds.setAudioStreamType(AudioManager.STREAM_MUSIC);
recorded_audio_in_sounds.start();
recorded_audio_in_sounds.start();
} else {
recorded_audio_in_sounds.reset();
recorded_audio_in_sounds.setDataSource(outputFile);
recorded_audio_in_sounds.prepare();
recorded_audio_in_sounds.start();
recorded_audio_in_sounds.setVolume(2.0f, 2.0f);
}
}
} catch (Exception e) {
e.printStackTrace();
}
}