How to merge audio and video using MediaMuxer? - android

I'm developing an Android App with mp3 audio file and mp4 video file with(no sound).
Now I want to mix them and create a new mp4 video file(with sound). From Android 4.3, Google suggests using the MediaMuxer class to mix stream audio and video. I have tried many times without success.
Here is the code:
private void muxing() {
String outputFile = "";
try {
File file = new File(Environment.getExternalStorageDirectory() + "/final2.mp4");
file.createNewFile();
outputFile = file.getAbsolutePath();
MediaExtractor videoExtractor = new MediaExtractor();
videoExtractor.setDataSource(Environment.getExternalStorageDirectory().toString()+"/vd1.h264");
MediaExtractor audioExtractor = new MediaExtractor();
audioExtractor.setDataSource(Environment.getExternalStorageDirectory()+"/audioVideoDir/audio.m4a");
MediaMuxer muxer = new MediaMuxer(outputFile, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
videoExtractor.selectTrack(0);
MediaFormat videoFormat = videoExtractor.getTrackFormat(0);
int videoTrack = muxer.addTrack(videoFormat);
audioExtractor.selectTrack(0);
MediaFormat audioFormat = audioExtractor.getTrackFormat(0);
int audioTrack = muxer.addTrack(audioFormat);
boolean sawEOS = false;
int frameCount = 0;
int offset = 100;
int sampleSize = 256 * 1024;
ByteBuffer videoBuf = ByteBuffer.allocate(sampleSize);
ByteBuffer audioBuf = ByteBuffer.allocate(sampleSize);
MediaCodec.BufferInfo videoBufferInfo = new MediaCodec.BufferInfo();
MediaCodec.BufferInfo audioBufferInfo = new MediaCodec.BufferInfo();
videoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC);
audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC);
muxer.start();
while (!sawEOS)
{
videoBufferInfo.offset = offset;
videoBufferInfo.size = videoExtractor.readSampleData(videoBuf, offset);
if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0)
{
// Log.d(TAG, "saw input EOS.");
sawEOS = true;
videoBufferInfo.size = 0;
}
else
{
videoBufferInfo.presentationTimeUs = videoExtractor.getSampleTime();
videoBufferInfo.flags = videoExtractor.getSampleFlags();
muxer.writeSampleData(videoTrack, videoBuf, videoBufferInfo);
videoExtractor.advance();
frameCount++;
}
}
Toast.makeText(getApplicationContext() , "frame:" + frameCount , Toast.LENGTH_SHORT).show();
boolean sawEOS2 = false;
int frameCount2 =0;
while (!sawEOS2)
{
frameCount2++;
audioBufferInfo.offset = offset;
audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, offset);
if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0)
{
// Log.d(TAG, "saw input EOS.");
sawEOS2 = true;
audioBufferInfo.size = 0;
}
else
{
audioBufferInfo.presentationTimeUs = audioExtractor.getSampleTime();
audioBufferInfo.flags = audioExtractor.getSampleFlags();
muxer.writeSampleData(audioTrack, audioBuf, audioBufferInfo);
audioExtractor.advance();
}
}
Toast.makeText(getApplicationContext() , "frame:" + frameCount2 , Toast.LENGTH_SHORT).show();
muxer.stop();
muxer.release();
} catch (IOException e) {
} catch (Exception e) {
}
}
I get the below error:
Unknown mime type 'audio/mpeg'. 12-09 11:58:33.569: A/MPEG4Writer(332): frameworks/av/media/libstagefright/MPEG4Writer.cpp:2699 CHECK(!"should not be here, unknown mime type.")
Any solution to resolve my issue with MediaMuxer API from Google? Even i tried with ffmpeg after merging of audio and video together video doesn't plays in default video player of android device but it loads in Vlc player.
FFMPEG followed from here:https://github.com/WritingMinds/ffmpeg-android-java

try to add mp4 video file.
Its working for me. but output file is with audio duration.

Related

PCM to MP3 using MediaCodec API Android

I am gettting PCM data from Microphone using AudioRecord class, how to encode it to MP3 using MediaCodec in Android?
Yes it is Possible MediaCodec Reference
Below code will convect pcm to m4a you have to modify muxer output format for mp3
private void convertAudio(String filename) throws IOException {
String outputpath =Environment.getExternalStoragePublicDirectory(Environment.DIRECTORY_MUSIC).getPath()+"/converted.m4a";
// Set up MediaExtractor to read from the source.
MediaExtractor extractor = new MediaExtractor();
extractor.setDataSource(filename);
int trackCount = extractor.getTrackCount();
// Set up MediaMuxer for the destination.
MediaMuxer muxer;
muxer = new MediaMuxer(outputpath, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
// Set up the tracks.
HashMap<Integer, Integer> indexMap = new HashMap<Integer, Integer>(trackCount);
for (int i = 0; i < trackCount; i++) {
extractor.selectTrack(i);
MediaFormat format = extractor.getTrackFormat(i);
format.setString(MediaFormat.KEY_MIME, MediaFormat.MIMETYPE_AUDIO_AMR_NB);
int dstIndex = muxer.addTrack(format);
indexMap.put(i, dstIndex);
}
// Copy the samples from MediaExtractor to MediaMuxer.
boolean sawEOS = false;
int bufferSize = 32000;
int frameCount = 0;
int offset = 100;
ByteBuffer dstBuf = ByteBuffer.allocate(bufferSize);
MediaCodec.BufferInfo bufferInfo = new MediaCodec.BufferInfo();
/* if (degrees >= 0) {
muxer.setOrientationHint(degrees);
}*/
// Test setLocation out of bound cases
muxer.start();
while (!sawEOS) {
bufferInfo.offset = offset;
bufferInfo.size = extractor.readSampleData(dstBuf, offset);
if (bufferInfo.size < 0) {
sawEOS = true;
bufferInfo.size = 0;
} else {
bufferInfo.presentationTimeUs = extractor.getSampleTime();
bufferInfo.flags = extractor.getSampleFlags();
int trackIndex = extractor.getSampleTrackIndex();
muxer.writeSampleData(indexMap.get(trackIndex), dstBuf,
bufferInfo);
extractor.advance();
frameCount++;
}
}
muxer.stop();
muxer.release();
return;
}

Android MediaExtractor readSampleData IllegalArgumentException

I try to follow this question Concatenate multiple mp4 audio files using android´s MediaMuxer and concatenate video files.
But for some reason, the MediaExtractor throws an IllegalArgumentException when I call the readSampleData
In the official API, this function shouldn't throw any Exceptions at all ! What's going on ?
I have found an old question that suggest that the size of the ByteBuffer might be responsible : Android MediaMuxer readSampleData IllegalStateException
I have tried tons of values for the size and none of them made the issue go away. Is there a standard size I should know ?
Any hints could help !
boolean VERBOSE = true;
private boolean concatenateFiles(File dst, List<File> sources) {
if ((sources == null) || (sources.size() == 0)) {
return false;
}
boolean result;
MediaExtractor extractor = null;
MediaMuxer muxer = null;
try {
// Set up MediaMuxer for the destination.
muxer = new MediaMuxer(dst.getPath(), MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
// Copy the samples from MediaExtractor to MediaMuxer.
boolean sawEOS = false;
int frameCount = 0;
int offset = 100;
ByteBuffer dstBuf = ByteBuffer.allocate(256 * 1024);
BufferInfo bufferInfo = new BufferInfo();
long timeOffsetUs = 0;
int dstTrackIndex = -1;
for (int fileIndex = 0; fileIndex < sources.size(); fileIndex++) {
int numberOfSamplesInSource = getNumberOfSamples(sources.get(fileIndex));
if (VERBOSE) {
Log.d(TAG, String.format("Source file: %s", sources.get(fileIndex).getPath()));
}
if (!sources.get(fileIndex).canRead()) {
throw new FileNotFoundException("Unable to read " + sources.get(fileIndex));
}
// Set up MediaExtractor to read from the source.
extractor = new MediaExtractor();
extractor.setDataSource(sources.get(fileIndex).getPath());
// Set up the tracks.
SparseIntArray indexMap = new SparseIntArray(extractor.getTrackCount());
for (int i = 0; i < extractor.getTrackCount(); i++) {
extractor.selectTrack(i);
MediaFormat format = extractor.getTrackFormat(i);
if (dstTrackIndex < 0) {
dstTrackIndex = muxer.addTrack(format);
muxer.start();
}
indexMap.put(i, dstTrackIndex);
}
long lastPresentationTimeUs = 0;
int currentSample = 0;
while (!sawEOS) {
bufferInfo.offset = offset;
bufferInfo.size = extractor.readSampleData(dstBuf, offset);
if (bufferInfo.size < 0) {
sawEOS = true;
bufferInfo.size = 0;
timeOffsetUs += (lastPresentationTimeUs);
}
else {
lastPresentationTimeUs = extractor.getSampleTime();
bufferInfo.presentationTimeUs = extractor.getSampleTime() + timeOffsetUs;
bufferInfo.flags = extractor.getSampleFlags();
int trackIndex = extractor.getSampleTrackIndex();
if ((currentSample < numberOfSamplesInSource) || (fileIndex == sources.size() - 1)) {
muxer.writeSampleData(indexMap.get(trackIndex), dstBuf, bufferInfo);
}
extractor.advance();
frameCount++;
currentSample++;
if (VERBOSE) {
Log.d(TAG, "Frame (" + frameCount + ") " +
"PresentationTimeUs:" + bufferInfo.presentationTimeUs +
" Flags:" + bufferInfo.flags +
" TrackIndex:" + trackIndex +
" Size(KB) " + bufferInfo.size / 1024);
}
}
}
extractor.release();
extractor = null;
}
result = true;
}
catch (Exception e) {
e.printStackTrace();
result = false;
}
finally {
if (extractor != null) {
extractor.release();
}
if (muxer != null) {
muxer.stop();
muxer.release();
}
}
return result;
}
Just had this error when decoding a 4k movie. Others (including other very high resolution movies) had been working fine, but about halfway through -co-incedentilly near a more complex scene...- I get this error.
My buffer size was 1mb (1024*1024), increased to 2mb and the problem is gone.
So the END_OF_STREAM you find in the source is presumably "end of the buffer"

Create MP4 container for .ts segment

I want to play static HLS content (not live video content) on my app in Android. What I currently do is download all the segments from the .m3u8 file and merge it into one file. When I play this file, I can see this the video being played, but it is not seekable. As per this link, .ts files are not seekable on Android.
I cannot risk running ffmpeg on phone for converting the file to MP4 format. I have studied MP4 format and its atom structure. What I want to know is, if there is an easy way to create MP4 container (atoms hierarchy) which would simply refer to the .ts segment (the merged segment that was created from sub-segments) in its data atom (mdat)
I would really appreciate any help/suggestions.
Not possible without a copy. TS is uses 188 byte packets with headers. These headers create breaks in the middle of frames. In mp4, frames must be contiguous.
Android provides support libraries such as MediaCodec and MediaExtractor that provides access to low level media encoding/decoding. It is fast and efficient as it uses hardware acceleration.
Here's how I believe one is suppose to do it on Android unless you are ok with using ffmpeg which of course if resource intensive operation.
1) Use MediaExtractor to extract data from the file.
2) Pass the extracted data to MediaCodec.
3) Use MediaCodec to render output to a surface (in case of video) and AudioTrack (in case of audio).
4) This is the most difficult step: Synchronize audio/video. I haven't implemented this yet. But this would require keeping track of time sync between audio and video. Audio would be played normally and you might have to drop some frames in case of video to keep it in sync with audio playback.
Here's code for decoding audio/video and playing them respectively using AudioTrack and Surface.
In case of video decoding, there's a sleep to slowdown the frame rendering.
public void decodeVideo(Surface surface) throws IOException {
MediaExtractor extractor = new MediaExtractor();
MediaCodec codec;
ByteBuffer[] codecInputBuffers;
ByteBuffer[] codecOutputBuffers;
extractor.setDataSource(file);
Log.d(TAG, "No of tracks = " + extractor.getTrackCount());
MediaFormat format = extractor.getTrackFormat(0);
String mime = format.getString(MediaFormat.KEY_MIME);
Log.d(TAG, "mime = " + mime);
Log.d(TAG, "format = " + format);
codec = MediaCodec.createDecoderByType(mime);
codec.configure(format, surface, null, 0);
codec.start();
codecInputBuffers = codec.getInputBuffers();
codecOutputBuffers = codec.getOutputBuffers();
extractor.selectTrack(0);
final long timeout_in_Us = 5000;
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
boolean sawInputEOS = false;
boolean sawOutputEOS = false;
int noOutputCounter = 0;
long startMs = System.currentTimeMillis();
while(!sawOutputEOS && noOutputCounter < 50) {
noOutputCounter++;
if(!sawInputEOS) {
int inputBufIndex = codec.dequeueInputBuffer(timeout_in_Us);
if(inputBufIndex >= 0) {
ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
int sampleSize = extractor.readSampleData(dstBuf, 0);
long presentationTimeUs = 0;
if(sampleSize < 0) {
Log.d(TAG, "saw input EOS.");
sawInputEOS = true;
sampleSize = 0;
} else {
presentationTimeUs = extractor.getSampleTime();
}
codec.queueInputBuffer(inputBufIndex, 0, sampleSize, presentationTimeUs, sawInputEOS ? MediaCodec.BUFFER_FLAG_END_OF_STREAM : 0);
if(!sawInputEOS) {
extractor.advance();
}
}
}
int res = codec.dequeueOutputBuffer(info, timeout_in_Us);
if(res >= 0) {
if(info.size > 0) {
noOutputCounter = 0;
}
int outputBufIndex = res;
while(info.presentationTimeUs/1000 > System.currentTimeMillis() - startMs) {
try {
Thread.sleep(5);
} catch (Exception e) {
break;
}
}
codec.releaseOutputBuffer(outputBufIndex, true);
if((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) == MediaCodec.BUFFER_FLAG_END_OF_STREAM) {
Log.d(TAG, "saw output EOS.");
sawOutputEOS = true;
}
} else if(res == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
codecOutputBuffers = codec.getOutputBuffers();
Log.d(TAG, "output buffers have changed.");
} else if(res == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
MediaFormat format1 = codec.getOutputFormat();
Log.d(TAG, "output format has changed to " + format1);
} else if(res == MediaCodec.INFO_TRY_AGAIN_LATER) {
Log.d(TAG, "Codec try again returned" + res);
}
}
codec.stop();
codec.release();
}
private int audioSessionId = -1;
private AudioTrack createAudioTrack(MediaFormat format) {
int channelConfiguration = format.getInteger(MediaFormat.KEY_CHANNEL_COUNT) == 1 ? AudioFormat.CHANNEL_OUT_MONO : AudioFormat.CHANNEL_OUT_STEREO;
int bufferSize = AudioTrack.getMinBufferSize(format.getInteger(MediaFormat.KEY_SAMPLE_RATE), channelConfiguration, AudioFormat.ENCODING_PCM_16BIT) * 8;
AudioTrack audioTrack;
if(audioSessionId == -1) {
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, format.getInteger(MediaFormat.KEY_SAMPLE_RATE), channelConfiguration,
AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM);
} else {
audioTrack = new AudioTrack(AudioManager.STREAM_MUSIC, format.getInteger(MediaFormat.KEY_SAMPLE_RATE), channelConfiguration,
AudioFormat.ENCODING_PCM_16BIT, bufferSize, AudioTrack.MODE_STREAM, audioSessionId);
}
audioTrack.play();
audioSessionId = audioTrack.getAudioSessionId();
return audioTrack;
}
public void decodeAudio() throws IOException {
MediaExtractor extractor = new MediaExtractor();
MediaCodec codec;
ByteBuffer[] codecInputBuffers;
ByteBuffer[] codecOutputBuffers;
extractor.setDataSource(file);
Log.d(TAG, "No of tracks = " + extractor.getTrackCount());
MediaFormat format = extractor.getTrackFormat(1);
String mime = format.getString(MediaFormat.KEY_MIME);
Log.d(TAG, "mime = " + mime);
Log.d(TAG, "format = " + format);
codec = MediaCodec.createDecoderByType(mime);
codec.configure(format, null, null, 0);
codec.start();
codecInputBuffers = codec.getInputBuffers();
codecOutputBuffers = codec.getOutputBuffers();
extractor.selectTrack(1);
AudioTrack audioTrack = createAudioTrack(format);
final long timeout_in_Us = 5000;
MediaCodec.BufferInfo info = new MediaCodec.BufferInfo();
boolean sawInputEOS = false;
boolean sawOutputEOS = false;
int noOutputCounter = 0;
while(!sawOutputEOS && noOutputCounter < 50) {
noOutputCounter++;
if(!sawInputEOS) {
int inputBufIndex = codec.dequeueInputBuffer(timeout_in_Us);
if(inputBufIndex >= 0) {
ByteBuffer dstBuf = codecInputBuffers[inputBufIndex];
int sampleSize = extractor.readSampleData(dstBuf, 0);
long presentationTimeUs = 0;
if(sampleSize < 0) {
Log.d(TAG, "saw input EOS.");
sawInputEOS = true;
sampleSize = 0;
} else {
presentationTimeUs = extractor.getSampleTime();
}
codec.queueInputBuffer(inputBufIndex, 0, sampleSize, presentationTimeUs, sawInputEOS ? MediaCodec.BUFFER_FLAG_END_OF_STREAM : 0);
if(!sawInputEOS) {
extractor.advance();
}
}
}
int res = codec.dequeueOutputBuffer(info, timeout_in_Us);
if(res >= 0) {
if(info.size > 0) {
noOutputCounter = 0;
}
int outputBufIndex = res;
//Possibly store the decoded buffer
ByteBuffer buf = codecOutputBuffers[outputBufIndex];
final byte[] chunk = new byte[info.size];
buf.get(chunk);
buf.clear();
if(chunk.length > 0) {
audioTrack.write(chunk, 0 ,chunk.length);
}
codec.releaseOutputBuffer(outputBufIndex, false);
if((info.flags & MediaCodec.BUFFER_FLAG_END_OF_STREAM) == MediaCodec.BUFFER_FLAG_END_OF_STREAM) {
Log.d(TAG, "saw output EOS.");
sawOutputEOS = true;
}
} else if(res == MediaCodec.INFO_OUTPUT_BUFFERS_CHANGED) {
codecOutputBuffers = codec.getOutputBuffers();
Log.d(TAG, "output buffers have changed.");
} else if(res == MediaCodec.INFO_OUTPUT_FORMAT_CHANGED) {
MediaFormat format1 = codec.getOutputFormat();
Log.d(TAG, "output format has changed to " + format1);
audioTrack.stop();
audioTrack = createAudioTrack(codec.getOutputFormat());
} else if(res == MediaCodec.INFO_TRY_AGAIN_LATER) {
Log.d(TAG, "Codec try again returned" + res);
}
}
codec.stop();
codec.release();
}

Android MediaMuxer Audio Issue

I am trying to use MediaMuxer to add a audio track to a video. The following code works but the audio stops halfway through the video. Both the video and audio files only have one track. The playback speed of the audio and video seem to be fine. The audio file is longer than the video so I dont think that is the issue. I have been at this for a while now can cant figure it out.
private void createFinalVideo(){
String outputFile = "";
try {
File file = new File(Environment.getExternalStorageDirectory() + File.separator + "final.mp4");
file.createNewFile();
outputFile = file.getAbsolutePath();
MediaExtractor videoExtractor = new MediaExtractor();
videoExtractor.setDataSource(OUTPUT);
MediaExtractor audioExtractor = new MediaExtractor();
final AssetFileDescriptor afd = context.getAssets().openFd("audio.m4a");
audioExtractor.setDataSource(afd.getFileDescriptor(),afd.getStartOffset(),afd.getLength());
Log.d(TAG, "Video Extractor Track Count " + videoExtractor.getTrackCount() );
Log.d(TAG, "Audio Extractor Track Count " + audioExtractor.getTrackCount() );
MediaMuxer muxer = new MediaMuxer(outputFile, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
videoExtractor.selectTrack(0);
MediaFormat videoFormat = videoExtractor.getTrackFormat(0);
int videoTrack = muxer.addTrack(videoFormat);
audioExtractor.selectTrack(0);
MediaFormat audioFormat = audioExtractor.getTrackFormat(0);
int audioTrack = muxer.addTrack(audioFormat);
Log.d(TAG, "Video Format " + videoFormat.toString() );
Log.d(TAG, "Audio Format " + audioFormat.toString() );
boolean sawEOS = false;
int frameCount = 0;
int offset = 100;
int sampleSize = 256 * 1024;
ByteBuffer videoBuf = ByteBuffer.allocate(sampleSize);
ByteBuffer audioBuf = ByteBuffer.allocate(sampleSize);
BufferInfo videoBufferInfo = new BufferInfo();
BufferInfo audioBufferInfo = new BufferInfo();
videoExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC);
audioExtractor.seekTo(0, MediaExtractor.SEEK_TO_CLOSEST_SYNC);
muxer.start();
while (!sawEOS)
{
videoBufferInfo.offset = offset;
audioBufferInfo.offset = offset;
videoBufferInfo.size = videoExtractor.readSampleData(videoBuf, offset);
audioBufferInfo.size = audioExtractor.readSampleData(audioBuf, offset);
if (videoBufferInfo.size < 0 || audioBufferInfo.size < 0)
{
Log.d(TAG, "saw input EOS.");
sawEOS = true;
videoBufferInfo.size = 0;
audioBufferInfo.size = 0;
}
else
{
videoBufferInfo.presentationTimeUs = videoExtractor.getSampleTime();
videoBufferInfo.flags = videoExtractor.getSampleFlags();
muxer.writeSampleData(videoTrack, videoBuf, videoBufferInfo);
videoExtractor.advance();
audioBufferInfo.presentationTimeUs = audioExtractor.getSampleTime();
audioBufferInfo.flags = audioExtractor.getSampleFlags();
muxer.writeSampleData(audioTrack, audioBuf, audioBufferInfo);
audioExtractor.advance();
frameCount++;
Log.d(TAG, "Frame (" + frameCount + ") Video PresentationTimeUs:" + videoBufferInfo.presentationTimeUs +" Flags:" + videoBufferInfo.flags +" Size(KB) " + videoBufferInfo.size / 1024);
Log.d(TAG, "Frame (" + frameCount + ") Audio PresentationTimeUs:" + audioBufferInfo.presentationTimeUs +" Flags:" + audioBufferInfo.flags +" Size(KB) " + audioBufferInfo.size / 1024);
}
}
muxer.stop();
muxer.release();
} catch (IOException e) {
Log.d(TAG, "Mixer Error 1 " + e.getMessage());
} catch (Exception e) {
Log.d(TAG, "Mixer Error 2 " + e.getMessage());
}
return;
}
The media format print out is the following
Video Format {max-input-size=1572864, frame-rate=28, height=1920, csd-0=java.nio.ByteArrayBuffer[position=0,limit=18,capacity=18], width=1072, durationUs=2968688, csd-1=java.nio.ByteArrayBuffer[position=0,limit=8,capacity=8], mime=video/avc, isDMCMMExtractor=1}
Audio Format {max-input-size=1572864, encoder-padding=708, aac-profile=2, csd-0=java.nio.ByteArrayBuffer[position=0,limit=2,capacity=2], sample-rate=44100, durationUs=19783401, channel-count=2, encoder-delay=2112, mime=audio/mp4a-latm, isDMCMMExtractor=1}
Any guidance would be highly appreciated.
Thanks
user346443: The issue was the video extractor was pulling data out quicker than the audio extractor. Splitting the audio and video into separate loops fixed the issue.

Android MediaMuxer readSampleData IllegalStateException

I am using the following code to generate a video. However, one in every 5 attempts the readSampleData raises either a IllegalArgumentException or a IllegalStateException. What can I do to stop the exceptions. Can I check the state of the extractor before calling readSampleData. If so, how?
MediaExtractor videoExtractor = new MediaExtractor();
videoExtractor.setDataSource(SOURCE);
Log.d(TAG, "Video Extractor Track Count " + videoExtractor.getTrackCount() );
MediaMuxer muxer = new MediaMuxer(outputFile, MediaMuxer.OutputFormat.MUXER_OUTPUT_MPEG_4);
videoExtractor.selectTrack(0);
MediaFormat videoFormat = videoExtractor.getTrackFormat(0);
int videoTrack = muxer.addTrack(videoFormat);
Log.d(TAG, "Video Format " + videoFormat.toString() );
boolean videoEnded = false;
long videoTimeStamp = 0;
int frameCount = 0;
int offset = 100;
ByteBuffer videoBuf = ByteBuffer.allocate(MAX_SAMPLE_SIZE);
BufferInfo videoBufferInfo = new BufferInfo();
muxer.start();
while (!videoEnded)
{
videoBufferInfo.offset = offset;
videoBufferInfo.size = videoExtractor.readSampleData(videoBuf, offset);
if (videoBufferInfo.size < 0)
{
Log.d(TAG, "video ended " + videoBufferInfo.size);
videoEnded = true;
videoBufferInfo.size = 0;
}
else
{
videoTimeStamp = videoBufferInfo.presentationTimeUs = videoExtractor.getSampleTime();
videoBufferInfo.flags = videoExtractor.getSampleFlags();
muxer.writeSampleData(videoTrack, videoBuf, videoBufferInfo);
videoExtractor.advance();
frameCount++;
Log.d(TAG, "Frame (" + frameCount + ") Video PresentationTimeUs:" + (videoBufferInfo.presentationTimeUs/1000000.0f) +" Flags:" + videoBufferInfo.flags +" Size " + videoBufferInfo.size);
}
}
muxer.stop();
muxer.release();
The actual code adds audio to the video after the video extractor loop.

Categories

Resources