Android Exoplayer play only high quality with HLS - android

I'd like to know if there is a way to specify to exoplayer to play only high quality of a stream in hls. My problem is that it takes too much time to play this quality even if I have a good network.
So if I could start playing in this quality and not in the lower one it would be great.
Any idea?
Regards,

Please modify as mentioned to pick high variant.
HlsChunkSource.java
OLD:
protected int computeDefaultVariantIndex(HlsMasterPlaylist playlist, Variant[] variants,
BandwidthMeter bandwidthMeter) {
int defaultVariantIndex = 0;
int minOriginalVariantIndex = Integer.MAX_VALUE;
for (int i = 0; i < variants.length; i++) {
int originalVariantIndex = playlist.variants.indexOf(variants[i]);
if (originalVariantIndex < minOriginalVariantIndex) {
minOriginalVariantIndex = originalVariantIndex;
defaultVariantIndex = i;
}
}
return defaultVariantIndex;
}
Chnage to :
protected int computeDefaultVariantIndex (HlsMasterPlaylist playlist, Variant[] variants,BandwidthMeter bandwidthMeter) {
int defaultVariantIndex = 0;
int minOriginalVariantIndex = Integer.MIN_VALUE;
for (int i = 0; i < variants.length; i++) {
int originalVariantIndex = playlist.variants.indexOf(variants[i]);
if (originalVariantIndex > minOriginalVariantIndex) {
minOriginalVariantIndex = originalVariantIndex;
defaultVariantIndex = i;
}
}
return defaultVariantIndex;
}
But if your device using Amlogic video codec(mostly set top boxes) , picking high variant cause video freeze which is Google closed as device issue.

Related

Incorrect duration and bitrate in ffmpeg-encoded audio

I am encoding raw data on Android using ffmpeg libraries. The native code reads the audio data from an external device and encodes it into AAC format in an mp4 container. I am finding that the audio data is successfully encoded (I can play it with Groove Music, my default Windows audio player). But the metadata, as reported by ffprobe, has an incorrect duration of 0.05 secs - it's actually several seconds long. Also the bitrate is reported wrongly as around 65kbps even though I specified 192kbps.
I've tried recordings of various durations but the result is always similar - the (very small) duration and bitrate. I've tried various other audio players such as Quicktime but they play only the first 0.05 secs or so of the audio.
I've removed error-checking from the following. The actual code checks every call and no problems are reported.
Initialisation:
void AudioWriter::initialise( const char *filePath )
{
AVCodecID avCodecID = AVCodecID::AV_CODEC_ID_AAC;
int bitRate = 192000;
char *containerFormat = "mp4";
int sampleRate = 48000;
int nChannels = 2;
mAvCodec = avcodec_find_encoder(avCodecID);
mAvCodecContext = avcodec_alloc_context3(mAvCodec);
mAvCodecContext->codec_id = avCodecID;
mAvCodecContext->codec_type = AVMEDIA_TYPE_AUDIO;
mAvCodecContext->sample_fmt = AV_SAMPLE_FMT_FLTP;
mAvCodecContext->bit_rate = bitRate;
mAvCodecContext->sample_rate = sampleRate;
mAvCodecContext->channels = nChannels;
mAvCodecContext->channel_layout = AV_CH_LAYOUT_STEREO;
avcodec_open2( mAvCodecContext, mAvCodec, nullptr );
mAvFormatContext = avformat_alloc_context();
avformat_alloc_output_context2(&mAvFormatContext, nullptr, containerFormat, nullptr);
mAvFormatContext->audio_codec = mAvCodec;
mAvFormatContext->audio_codec_id = avCodecID;
mAvOutputStream = avformat_new_stream(mAvFormatContext, mAvCodec);
avcodec_parameters_from_context(mAvOutputStream->codecpar, mAvCodecContext);
if (!(mAvFormatContext->oformat->flags & AVFMT_NOFILE))
{
avio_open(&mAvFormatContext->pb, filePath, AVIO_FLAG_WRITE);
}
if ( mAvFormatContext->oformat->flags & AVFMT_GLOBALHEADER )
{
mAvCodecContext->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
}
avformat_write_header(mAvFormatContext, NULL);
mAvAudioFrame = av_frame_alloc();
mAvAudioFrame->nb_samples = mAvCodecContext->frame_size;
mAvAudioFrame->format = mAvCodecContext->sample_fmt;
mAvAudioFrame->channel_layout = mAvCodecContext->channel_layout;
av_samples_get_buffer_size(NULL, mAvCodecContext->channels, mAvCodecContext->frame_size,
mAvCodecContext->sample_fmt, 0);
av_frame_get_buffer(mAvAudioFrame, 0);
av_frame_make_writable(mAvAudioFrame);
mAvPacket = av_packet_alloc();
}
Encoding:
// SoundRecording is a custom class with the raw samples to be encoded
bool AudioWriter::encodeToContainer( SoundRecording *soundRecording )
{
int ret;
int frameCount = mAvCodecContext->frame_size;
int nChannels = mAvCodecContext->channels;
float *buf = new float[frameCount*nChannels];
while ( soundRecording->hasReadableData() )
{
//Populate the frame
int samplesRead = soundRecording->read( buf, frameCount*nChannels );
// Planar data
int nFrames = samplesRead/nChannels;
for ( int i = 0; i < nFrames; ++i )
{
for (int c = 0; c < nChannels; ++c )
{
samples[c][i] = buf[nChannels*i +c];
}
}
// Fill a gap at the end with silence
if ( samplesRead < frameCount*nChannels )
{
for ( int i = samplesRead; i < frameCount*nChannels; ++i )
{
for (int c = 0; c < nChannels; ++c )
{
samples[c][i] = 0.0;
}
}
}
encodeFrame( mAvAudioFrame ) )
}
finish();
}
bool AudioWriter::encodeFrame( AVFrame *frame )
{
//send the frame for encoding
int ret;
if ( frame != nullptr )
{
frame->pts = mAudFrameCounter++;
}
avcodec_send_frame(mAvCodecContext, frame );
while (ret >= 0)
{
ret = avcodec_receive_packet(mAvCodecContext, mAvPacket);
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF )
{
break;
}
else
if (ret < 0) {
return false;
}
av_packet_rescale_ts(mAvPacket, mAvCodecContext->time_base, mAvOutputStream->time_base);
mAvPacket->stream_index = mAvOutputStream->index;
av_interleaved_write_frame(mAvFormatContext, mAvPacket);
av_packet_unref(mAvPacket);
}
return true;
}
void AudioWriter::finish()
{
// Flush by sending a null frame
encodeFrame( nullptr );
av_write_trailer(mAvFormatContext);
}
Since the resultant file contains the recorded music, the code to manipulate the audio data seems to be correct (unless I am overwriting other memory somehow).
The inaccurate duration and bitrate suggest that information concerning time is not being properly managed. I set the pts of the frames using a simple increasing integer. I'm unclear what the code that sets the timestamp and stream index achieves - and whether it's even necessary: I copied it from supposedly working code but I've seen other code without it.
Can anyone see what I'm doing wrong?
The timestamp need to be correct. Set the time_base to 1/sample_rate and increment the timestamp by 1024 each frame. Note: 1024 is aac specific. If you change codecs, you need to change the frame size.

How to query the Video Capabilities of an Android device?

public void getCodecInfo() {
int numCodecs = MediaCodecList.getCodecCount();
for (int i = 0; i < numCodecs; i++) {
MediaCodecInfo codecInfo = MediaCodecList.getCodecInfoAt(i);
if (!codecInfo.isEncoder()) {
continue;
}
String[] types = codecInfo.getSupportedTypes();
for (int j = 0; j < types.length; j++) {
MediaCodecInfo.CodecCapabilities capabilities = codecInfo.getCapabilitiesForType(types[j]);
Log.d("CodecCapabilities", new Gson().toJson(capabilities));
//MediaCodecInfo.VideoCapabilities videoCapabilities = capabilities.getVideoCapabilities();
//Log.d("videoCapabilities", new Gson().toJson(videoCapabilities));
}
}
}
The above gave me this, what does the following number for profile and level tell me anything related to the video capabilities?
{"colorFormats":[21,2130708361],"profileLevels":[{"level":2048,"profile":1},{"level":2048,"profile":2},{"level":2048,"profile":8}],"flags":0}
If I uncomment these two lines in the above code, it crashes with this error message:
java.lang.NoSuchMethodError: android.media.MediaCodecInfo$CodecCapabilities.getVideoCapabilities
How can I query the android device, to find out the video capabilities? I'd like to know the max video bitrate and video resolution the device is capable to handle.
I guess that you are testing your code on a device running API < 21? If is the case, the getVideoCapabilies method is available only on devices running Android >= 21
Anyway, to get bitrate and supported width & height Ranges (API >=21 too...Humm may be related to getVideoCapabilies availability... I don't know :) ), you can use :
Range<Integer> bitrateRange = videoCapabilities.getBitrateRange();
Range<Integer> heightRange = videoCapabilities.getSupportedHeights();
Range<Integer> widthRange = videoCapabilities.getSupportedWidths();
You can take a look at this gist that I published a few days ago to get all capabilities for a given codec (Colors, profiles & levels are printed by names instead of numbers, which could be very helpful): TestMediaCodecInfoAsyncTask.java

how to set audio virtualizer properly

I am trying to make an android equalizer but I am not able to use the virtualizer properly .Not Visualizer this is some piece of my code the app is not getting crashed but it is just not working
vr = new Virtualizer(0, mediaPlayer.getAudioSessionId());
virtal.setOnSeekBarChangeListener(new SeekBar.OnSeekBarChangeListener() {
#Override
public void onProgressChanged(SeekBar seekBar, int i, boolean b) {
if (seekBar == virtal) {
vr.setEnabled(level > 0 ? true : false);
vr.setStrength((short) i); // Already in the right range 0-1000
} else if (eq != null) {
int new_level = min_level + (max_level - min_level) * level / 100;
for (int j = 0; j < num_sliders; j++) {
if (sliders[j] == seekBar) {
eq.setBandLevel((short) j, (short) new_level);
break;
}
}
}
}
i am able to use the equalizer and baseboost class but don't know why this one is not showing any effect in the audio.Any help or guidance will be helpful.I followed some of the projects like James music player but don't know what is the issue.
You forgot to force the virtualization mode - you should usually check if the audio file and device support it, but this will provide some results regardless:
int mode = Virtualizer.VIRTUALIZATION_MODE_TRANSAURAL; // Or BINAURAL
vr.setEnabled(true);
vr.forceVirtualizationMode(mode);
Full documentation of modes here: https://developer.android.com/reference/android/media/audiofx/Virtualizer#forceVirtualizationMode(int)

How can I known whether it is a beat now of the current mp3 music in Android

I'm trying to develop such a android app: when it is druming or beating now of the current playing music, I can do something. So I should analyse the current music first, and then I should decide whether it is beating now!
I have test the Android Api Demo, using the MediaPlayer and Visualizer class I can get the original byte data, but how can I know whether it is beating now?
I'm new here...sorry if I have not describe clearly and any answer is appreciative!
Here is my partial code to refresh android view:
public void updateVisualizer(byte[] fft)
{
if(mFirst )
{
mInfoView.setText(mInfoView.getText().toString() + "\nCaptureSize: " + fft.length);
mFirst = false;
}
byte[] model = new byte[fft.length / 2 + 1];
model[0] = (byte) Math.abs(fft[0]);
for (int i = 2, j = 1; j < mSpectrumNum;)
{
model[j] = (byte) Math.hypot(fft[i], fft[i + 1]);
i += 2;
j++;
}
mBytes = model;
// I want to decide whether it is beating now
/*if(beating){
doSomeThing();
}*/
invalidate();
}

how to integrate audio with video in android using javacv/opencv?

For my application i created video from set of images by using javacv/opencv in android.but that video plays with out sound.so i want to add my recorded audio(mp3 file) to that generated video how can i achieve it?
This is my code which is used to get video from images,
String path =SCREENSHOT_FOLDER2;
File folder = new File(path);
listOfFiles = folder.listFiles();
if(listOfFiles.length>0)
{
iplimage = new opencv_core.IplImage[listOfFiles.length];
for (int j = 0; j < listOfFiles.length; j++) {
String files="";
if (listOfFiles[j].isFile())
{
files = listOfFiles[j].getName();
}
String[] tokens = files.split("\\.(?=[^\\.]+$)");
String name=tokens[0];
System.out.println(" j " + name);
iplimage[j]=cvLoadImage("/mnt/sdcard/images/"+name+".png");
}
}
File videopath = new File(SCREENSHOT_FOLDER3);
videopath.mkdirs();
FFmpegFrameRecord recorder = new
FFmpegFrameRecord(SCREENSHOT_FOLDER3+
"video"+System.currentTimeMillis()+".mp4",320,480);
try {
recorder.setCodecID(CODEC_ID_MPEG4); //CODEC_ID_MPEG4
//CODEC_ID_MPEG1VIDEO
recorder.setBitrate(sampleVideoBitRate);
recorder.setFrameRate(10);
recorder.setPixelFormat(PIX_FMT_YUV420P); //PIX_FMT_YUV420P
recorder.start();
int x = 0;
int y = 0;
for (int i=0;i< 300 && x<iplimage.length;i++)
{
recorder.record(image[x]);
if (i>(y+10)) {
y=y+1;
x++;
}
}
recorder.stop();
}
catch (Exception e){
e.printStackTrace();
}
now how to integrate audio file(.mp3) file in this code.
OpenCV, and subsequently JavaCV has no support for audio.
You have to go with a different library for it. Look at the Android support for video/audio, third-pary libs, or any other way you may find useful.
But don't just expect OpenCV to help you because it's support for audio is 0%.
I would not say that JavaCV has no support for audio, as it integrates a lot of libraries that opencv does not - ffmpeg for example. Check this long thread for that issue.

Categories

Resources