I'm trying to play multi-language audio tracks on chromecast. It plays the video/audio files but it does not switch from English to Spanish. I can switch using the same file in a silverlight player. I've set my chromecast player language and my android phone language to be spanish. In the Google Cast SDK Developer console, I added spanish as an alternate track.
I've added my custom player using the code specified in the google guides, however the code in the guides does not appear to work from a straight copy and paste. Can someone direct me as to why protocol.getStreamCount() is 0? Is the timing of the call correct? Here's the entire player that I'm using, pay particular attention to window.changeLanguage = function()... and the changeLanguage call. Any help is extremely appreciated!
<html>
<head>
<title>Test Player</title>
<script type="text/javascript" src="//www.gstatic.com/cast/sdk/libs/receiver/2.0.0/cast_receiver.js"></script>
<script type="text/javascript" src="//www.gstatic.com/cast/sdk/libs/mediaplayer/1.0.0/media_player.js"></script>
<link rel="stylesheet" type="text/css" href="chromecast.css" />
</head>
<body>
<video id='vid' />
<script type="text/javascript">
if (window.location.href.indexOf('Debug=true') != -1) {
cast.receiver.logger.setLevelValue(cast.receiver.LoggerLevel.DEBUG);
cast.player.api.setLoggerLevel(cast.player.api.LoggerLevel.DEBUG);
}
var mediaElement = document.getElementById('vid');
var protocol = null;
// Create the media manager. This will handle all media messages by default.
window.mediaManager = new cast.receiver.MediaManager(mediaElement);
window.defaultOnLoad = mediaManager.onLoad;
mediaManager.onLoad = function (event) {
if (window.player !== null) {
player.unload(); // Must unload before starting again.
window.player = null;
}
if (event.data['media'] && event.data['media']['contentId']) {
console.log('Starting media application');
var url = event.data['media']['contentId'];
window.host = new cast.player.api.Host({'mediaElement':mediaElement, 'url':url});
var ext = url.substring(url.lastIndexOf('.'), url.length);
var initStart = event.data['media']['currentTime'] || 0;
var autoplay = event.data['autoplay'] || true;
mediaElement.autoplay = autoplay; // Make sure autoplay get's set
if (url.lastIndexOf('.m3u8') >= 0)
{
// HTTP Live Streaming
protocol = cast.player.api.CreateHlsStreamingProtocol(host);
}
else if (url.lastIndexOf('.mpd') >= 0)
{
// MPEG-DASH
protocol = cast.player.api.CreateDashStreamingProtocol(host);
}
else if (url.indexOf('.ism/') >= 0)
{
// Smooth Streaming
protocol = cast.player.api.CreateSmoothStreamingProtocol(host);
}
// How to override a method in Host. I know that it's safe to just provide this
// method.
host.onError = function(errorCode) {
console.log("Fatal Error - " + errorCode);
if (window.player) {
window.player.unload();
window.player = null;
}
};
console.log("we have protocol " + ext);
if (protocol !== null) {
console.log("Starting Media Player Library");
window.player = new cast.player.api.Player(host);
window.player.load(protocol, initStart);
changeLanguage();
}
else {
window.defaultOnLoad(event); // do the default process
}
}
}
window.changeLanguage = function() {
var currentLanguage = null;
var streamCount = protocol.getStreamCount();
console.log("streamCount: " + streamCount);
var streamInfo;
for (var i = 0; i < streamCount; i++) {
console.log("isStreamEnabled " + i + "? " + protocol.isStreamEnabled(i));
if (protocol.isStreamEnabled(i)) {
streamInfo = protocol.getStreamInfo(i);
console.log("streamInfo isAudio? " + streamInfo.mimeType.indexOf('audio'));
if (streamInfo.mimeType.indexOf('audio') === 0) {
if (streamInfo.language) {
console.log("streamInfo.language: " + streamInfo.language);
currentLanguage = i;
break;
}
}
}
}
if (currentLanguage === null) {
currentLanguage = 0;
}
i = currentLanguage + 1;
console.log("i: " + i);
console.log("currentLanguage: " + currentLanguage);
while (i !== currentLanguage) {
if (i === streamCount) {
console.log("resetting i to 0");
i = 0;
}
streamInfo = protocol.getStreamInfo(i);
if (streamInfo.mimeType.indexOf('audio') === 0) {
protocol.enableStream(i, true);
protocol.enableStream(currentLanguage, false);
console.log("Enabling: " + i);
console.log("Disabling: " + currentLanguage);
break;
}
i++;
}
if (i !== currentLanguage) {
console.log("reloading window player" + window.player);
window.player.reload();
}
};
window.player = null;
console.log('Application is ready, starting system');
window.castReceiverManager = cast.receiver.CastReceiverManager.getInstance();
castReceiverManager.start();
</script>
</body>
</html>
Chromecast Console:
Application is ready, starting system
Starting media application
we have protocol .ism/manifest
Starting Media Player Library
streamCount: 0
i: 1
currentLanguage: 0
EDIT
I went into console and post loading called the following command:
this.protocol.getStreamCount()
2
So this is clearly a timing issue. I need to call changeLanguage once the stream has fully loaded. I'm investigating when the appropriate time to call it is. I'd appreciate any help but I'll try to google this answer now that I'm positive it's a timing issue.
Answer is:
host.onManifestReady = function() {
changeLanguage();
};
This will provide the correct timing for when to pull the alternate audio tracks.
Related
I came across several questions on this subject. I'm trying to select the rear camera on an Android device running Chrome.
So, after some reading :
var selector = document.getElementById('video-source-selector');
navigator.mediaDevices.enumerateDevices()
.then(function(devices) {
var videoDevices = devices.map(function (item) {
if(item.kind === 'videoinput'){
return item;
}
}).filter(function( element ) {
return element !== undefined;
});
var max = videoDevices.length;
videoDevices.forEach(function(device, i) {
var html = '';
var div = document.createElement('div');
if(i === max-1){ // last element reached
html += '<option value="'+device.deviceId+'" selected>'+ device.label +'</option>';
}
else {
html += '<option value="'+device.deviceId+'">'+ device.label +'</option>';
}
div.innerHTML = html;
selector.appendChild(div.childNodes[0]);
console.log(device.kind + ": " + device.label +
" id = " + device.deviceId);
});
})
.catch(function(err) {
console.log(err.name + ": " + err.message);
});
selector.addEventListener("change", function(){
console.log(selector.value); // Works as supposed : returns the ID of the selected device
});
Then, as I'm using Three.js in this app, I'm binding this ID to Jerome Etienne three extension WebcamGrabbing (https://github.com/jeromeetienne/threex.webar):
var videoGrabbing = new THREEx.WebcamGrabbing(selector.value);
Then I had to modify THREEx.WebcamGrabbing class this way (I removed the irrelevant parts):
THREEx.WebcamGrabbing = function(sourceDeviceId){
...
console.log('webcamgrabbing : ', sourceDeviceId); // returns the expected ID
var constraints = {
video: {
optional: [{
sourceId: sourceDeviceId
}]
}
}
// try to get user media
navigator.getUserMedia( constraints, function(stream){
domElement.src = URL.createObjectURL(stream);
}, function(error) {
console.error("Cant getUserMedia()! due to ", error);
});
...
}
But still, Chrome on Android is still giving me the stream of the face camera, whatever device I select...
What do I miss?
EDIT : Based on this topic (GetUserMedia - facingmode), I came up with some logs to see what's happening here :
var constraints = {
audio: false,
video: { facingMode: { exact: "environment" } }
}
console.log('Try to get stream with constraints:', constraints);
navigator.getUserMedia( constraints, function(stream){
var videoTracks = stream.getVideoTracks();
console.log('Got stream with constraints:', constraints); // Ok
console.log('Using video device: ' + videoTracks[0].label); // > Using video device: camera 0, facing back
for(var i = 0; i < videoTracks.length; i++){
console.log('Found video device with contraints : ', videoTracks[i].label); // Found video device with contraints : camera 0, facing back
}
domElement.src = URL.createObjectURL(stream);
}, function(error) {
console.error("Cant getUserMedia()! due to ", error);
});
An alternative way to select the back camera on chrome is to use the enumerateDevices method.
First get all the video input id's:
navigator.mediaDevices.enumerateDevices().then(function(devices) {
devices.forEach(function(device) {
if(device.kind=="videoinput"){
//If device is a video input add to array.
}
});
Then the first element of the array will contain the id of the front camera, the second element will contain the id of the back camera.
Finally put the id of the camera that you want to use
navigator.getUserMedia({audio: false, video: { sourceId: VideoId } }, successCallback, errorCallback);
These days i am trying to run an existing code from Adobe's Technical Evangelist Andrew Trice. The concept is to connect a database.com DB to phone gap application. here is the tutorial with the source code :
http://wiki.developerforce.com/page/Building_PhoneGap_Mobile_Applications_Powered_by_Database.com
THE ISSUE :
AFTER SUCCESSFUL INSTALLATION OF THE Childbrowser there is a specific error in the code :
Uncaught type error: cannot read property "ChildBrowser" of undefined at file:///android_asset/www/js/salesforceWrapper.js:17
here is the full code : ( i put comment in the line with the error)
function SalesforceWrapper() {
/* AUTHENTICATION PARAMETERS */
this.loginUrl = 'https://login.salesforce.com/';
this.clientId = '3MVG99qusVZJwhsmjZIlEaUsFRnadOib8Kv_MPwooFMEi.XpChrZ5cVEcKU_7NR1zfQjjmdHI7wMARXnLlgku';
this.redirectUri = 'https://login.salesforce.com/services/oauth2/success';
/* CLASS VARIABLES */
this.cb = undefined; //ChildBrowser in PhoneGap
this.client = undefined; //forceTk client instance
this.init();
}
SalesforceWrapper.prototype.init = function() {
this.client = new forcetk.Client(this.clientId, this.loginUrl);
// line 17 with the error
this.cb = window.plugins.childBrowser;
}
SalesforceWrapper.prototype.login = function (successCallback) {
this.loginSuccess = successCallback;
var self = this;
self.cb.onLocationChange = function (loc) {
if (loc.search(self.redirectUri) >= 0) {
self.cb.close();
self.sessionCallback(unescape(loc));
}
};
self.cb.showWebPage(self.getAuthorizeUrl(self.loginUrl, self.clientId, self.redirectUri));
}
SalesforceWrapper.prototype.getAuthorizeUrl = function (loginUrl, clientId, redirectUri) {
return loginUrl + 'services/oauth2/authorize?display=touch' + '&response_type=token&client_id=' + escape(clientId) + '&redirect_uri=' + escape(redirectUri);
}
SalesforceWrapper.prototype.sessionCallback = function(loc) { var oauthResponse = {};
var fragment = loc.split("#")[1];
if (fragment) {
var nvps = fragment.split('&');
for (var nvp in nvps) {
var parts = nvps[nvp].split('=');
oauthResponse[parts[0]] = unescape(parts[1]);
}
}
if (typeof oauthResponse === 'undefined' || typeof oauthResponse['access_token'] === 'undefined') {
console.log("error");
} else {
this.client.setSessionToken(oauthResponse.access_token, null, oauthResponse.instance_url);
if ( this.loginSuccess ) {
this.loginSuccess();
}
}
this.loginSuccess = undefined;
}
ADOBE'S PHONE GAP IS A DANGEROUS TOOL , BUT IT IS STILL SO TOUGH TO MANIPULATE.
THANK YOU APRIORI FOR YOUR POSSIBLE PROPOSALS . AT THIS POINT I DON'T EXPECT TO GET THE ANSWER. I KNOW THAT THERE IS A BIG DISTANCE...
well people the problem was caused because of the index.html in which the reference of the childbrowser.js was in different folder.
it was in :
<script type="text/javascript" src="js/libs/ChildBrowser.js"></script>
instead of :
<script type="text/javascript" src="js/ChildBrowser.js"></script>
SO PEOPLE BE CAREFULL!!!
NOW MY PROBLEM HAS TO DO WITH ANOTHER FUNCTION BUT I THINK THAT IT IS SOMETHING SIMILAR.
THANK YOU ALL!
PLEASE TRY TO ANSWER IF YOU KNOW SOMETHING.
When I use html5 'getUserMedia' API to access acamera on the android(4.0) phone, it comes out "front camera", but I want to open "back camera". Sample code:
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<meta name="viewport" content="width=device-width">
<title>Html5 Mobile Carema</title>
<script src="js/jquery.js"></script>
<script>
$(document).ready(init);
function init() {
try {
window.URL = window.URL || window.webkitURL || window.msURL
|| window.oURL;
navigator.getUserMedia = navigator.getUserMedia
|| navigator.webkitGetUserMedia
|| navigator.mozGetUserMedia || navigator.msGetUserMedia;
navigator.getUserMedia({
video : true
}, successsCallback, errorCallback);
} catch (err) {
// Tries it with old spec of string syntax
navigator.getUserMedia('video', successsCallback, errorCallback);
}
$(":button").click(function() {
slap();
});
}
function slap() {
var video = $("#myVideo")[0];
var canvas = capture(video);
$("#result").empty();
$("#result").append(canvas);
//alert();
var imgData = canvas.toDataURL('image/png;base64,');
//var imgData = canvas.toDataURL("image/png");
var imgData = imgData.substring(22);
//blb = dataURItoBlob(imgData);
//sendMsg(blb);
}
function errorCallback(err) {
}
function successsCallback(stream) {
$("#myVideo").attr("src", window.webkitURL.createObjectURL(stream));
}
function capture(video) {
var canvas = document.createElement('canvas');
var width = video.videoWidth;
var height = video.videoHeight;
canvas.width = video.videoWidth;
canvas.height = video.videoHeight;
var context = canvas.getContext('2d');
context.drawImage(video, 0, 0, 160, 120);
return canvas;
}
</script>
</head>
<body>
<video id="myVideo" autoplay="autoplay"></video>
<br> <input type="button" value="capture" />
<br><div id="result" style="width: 145px"></div>
<div>
<p id="resultMsg" style="color: red"></p>
<p id="decodeTime" style="color: green"></p>
</div>
</body>
</html>
I don't know how to access specific camera on android phone, anyone who knows? thanks
There is now the ability to specify a camera in the latest specification with the facingMode property: http://www.w3.org/TR/mediacapture-streams/#idl-def-VideoFacingModeEnum
This property is an optional part of the MediaStreamConstraints object that is the first argument of the getUserMedia method.
Here's a simplified example from the spec:
var supports = navigator.mediaDevices.getSupportedConstraints();
if (!supports["facingMode"]) {
// Handle lack of browser support if necessary
}
var gotten = navigator.mediaDevices.getUserMedia({
video: {
facingMode: {exact: "environment"}
}
});
The value environment means the back camera of the device. Other values are user, left and right.
Note that support for this varies depending on the browser/browser version.
See function gotSources(sourceInfos) in the code below
<!--
Based on Motion Detector Demo Created by Ákos Nikházy.
If you use this app please link this demo http://motion-detector.nikhazy-dizajn.hu/
-->
<!DOCTYPE html>
<html>
<head>
<meta charset=utf-8 />
<title>Frame capture demo</title>
</head>
<body>
<header>
<h1>Motion Detection</h1>
<h4>with HTML5 API using .getUserMedia()</h4>
</header>
<video autoplay></video>
<hr>
<canvas id="savePhoto"></canvas>
<script>
function hasGetUserMedia() {
//returns true if supported
return !!(navigator.getUserMedia || navigator.webkitGetUserMedia
|| navigator.mozGetUserMedia || navigator.msGetUserMedia);
}
function onSuccess(stream) {
//If we can stream from camera.
var source;
//Get the stream. This goes to the video tag
if (window.URL) {
source = window.URL.createObjectURL(stream);
} else if (window.webkitURL) {
source = window.webkitURL.createObjectURL(stream);
} else {
source = stream; // Opera and Firefox
}
//Set up video tag
video.autoplay = true;
video.src = source;
//We try to find motion in every X second
setInterval(function() {
motionDetector();
}, sampling);
}
function onError() {
//if we fail (not supported, no camera etc.)
alert('No stream, no win. Refresh.');
}
function saveImage(canvasToSave) {
//create image from canvas
dataUrl = canvasToSave.toDataURL();
imageFound = document.createElement('img');
imageFound.src = dataUrl;
document.body.appendChild(imageFound);
}
function motionDetector() {
ctxSave.drawImage(video, 0, 0, savePhoto.width, savePhoto.height);
}
/*After all those functions lets start setting up the program*/
//Set up elements. Should be a ini() but I don't care right now
var video = document.querySelector('video'); //the video tag
var savePhoto = document.getElementById('savePhoto'); //the possible saved image's canvas
var ctxSave = savePhoto.getContext('2d'); //the latest image from video in full size and color
var sampling = 1000; //how much time needed between samples in milliseconds
var videoSourceInfo = null;
//We need this so we can use the videoWidth and ...Height, also we setup canvas sizes here, after we have video data
video.addEventListener("loadedmetadata", function() {
console.log(video.videoWidth + ":" + video.videoHeight)
savePhoto.width = video.videoWidth;
savePhoto.height = video.videoHeight;
});
function start() { //Start the whole magic
if (hasGetUserMedia()) {
//it is working?
navigator.getUserMedia
|| (navigator.getUserMedia = navigator.mozGetUserMedia
|| navigator.webkitGetUserMedia
|| navigator.msGetUserMedia);
var videoSourceInfoId = videoSourceInfo.id;
var constraints = {
video : {
optional: [{sourceId: videoSourceInfoId}]
},
toString : function() {
return "video";
}
};
navigator.getUserMedia(constraints, onSuccess, onError);
} else {
//no support
alert('getUserMedia() is not supported in your browser. Try Chrome.');
}
}
function gotSources(sourceInfos) {
for (var i = sourceInfos.length-1 ; i >= 0; i--) { // get last camera index (supposed to back camera)
var sourceInfo = sourceInfos[i];
if (sourceInfo.kind === 'video') {
videoSourceInfo = sourceInfo;
console.log('SourceId: ', videoSourceInfo.id);
start();
break;
} else {
console.log('Some other kind of source: ', sourceInfo);
}
}
}
if (typeof MediaStreamTrack === 'undefined') {
alert('This browser does not support MediaStreamTrack.\n\nTry Chrome Canary.');
} else {
MediaStreamTrack.getSources(gotSources); // async task
}
</script>
</body>
</html>
Hi I think this works with you
<script>
var gum = mode =>
navigator.mediaDevices.getUserMedia({video: {facingMode: {exact: mode}}})
.then(stream => (video.srcObject = stream))
.catch(e => log(e));
var stop = () => video.srcObject && video.srcObject.getTracks().forEach(t => t.stop());
var log = msg => div.innerHTML += msg + "<br>";
</script>
<button onclick="stop();gum('user')">Front</button>
<button onclick="stop();gum('environment')">Back</button>
<div id="div"></div><br>
<video id="video" height="320" autoplay></video>
<script src="https://webrtc.github.io/adapter/adapter-latest.js"></script>
facingMode??
https://github.com/webrtcHacks/adapter/issues/820
https://developer.mozilla.org/en-US/docs/Web/API/MediaTrackConstraints/facingMode
im trying to record an audio file using Media object in phonegap and then play it. i have no problem recording, but then when i try to play it my app shuts down, both on my emulator and my device. on the logcat it says: "null pointer exception at org.apache.cordova.readyPlayer (AudioPlayer.java)". many thanks to anyone who is willing to help me, it took me a while to realize im completely lost here.
here is the code which i copied from http://docs.phonegap.com/en/2.4.0/cordova_media_media.md.html#media.stopRecord and added .play() after the .stopRecord(). i can add the manifest and activity if needed, i doubt that the problem is there but i could be wrong.
index.html:
<script type="text/javascript" charset="utf-8" src="cordova-2.4.0.js"></script>
<script type="text/javascript" charset="utf-8">
// Wait for Cordova to load
//
document.addEventListener("deviceready", onDeviceReady, false);
// Record audio
//
function recordAudio() {
var src = "myrecording.amr";
var mediaRec = new Media(src, onSuccess, onError);
// Record audio
mediaRec.startRecord();
// Stop recording after 10 sec
var recTime = 0;
var recInterval = setInterval(function() {
recTime = recTime + 1;
setAudioPosition(recTime + " sec");
if (recTime >= 10) {
clearInterval(recInterval);
mediaRec.stopRecord();
document.getElementById('audio_position').innerHTML = "finished";
**mediaRec.play();**
}
}, 1000);
}
// Cordova is ready
//
function onDeviceReady() {
recordAudio();
}
// onSuccess Callback
//
function onSuccess() {
console.log("recordAudio():Audio Success");
}
// onError Callback
//
function onError(error) {
alert('code: ' + error.code + '\n' +
'message: ' + error.message + '\n');
}
// Set audio position
//
function setAudioPosition(position) {
document.getElementById('audio_position').innerHTML = position;
}
</script>
<p id="media">Recording audio...</p>
<p id="audio_position"></p>
yuval
I found the same issue with Cordova 3.5 (with Media plugin 0.2.11) and Android 4.4. The solution is as described above. Basically something like this should do the trick:
mediaRec.stopRecord();
src = mediaRec.src; // if you don't have src defined anymore
mediaRec.release();
mediaPlay = new Media(src, onSuccess, onError);
mediaPlay.play();
There is no need to create an other Media object to play the previously recorded file. Just do the following - you have to release the media object after recording is stopped:
// variable in global scope
var mediaRec = 0;
// function to record an audio
function recordAudio() {
// create media object for your recording
mediaRec = new Media("phonegap_rec.amr", onMediaRecordSuccess, onMediaRecordError);
// start recording
mediaRec.startRecord();
// recording for 5 seconds
var recTime = 0;
var recInterval = setInterval(function() {
recTime = recTime + 1;
if (recTime >= 5) {
clearInterval(recInterval);
// stop recording after 5 seconds
mediaRec.stopRecord();
// release the media object (THIS IS THE KEY SOLUTION HERE)
mediaRec.release();
}
}, 1000);
}
// function to playback recorded audio
function playRecord() {
// check if the object exists
if (mediaRec) {
// now you can playback the recording
mediaRec.play();
}
}
var style1 = document.createElement("link");
style1.id = "rel";
style1.rel = "stylesheet";
style1.href = "http://www.mysite.com/css.css";
style1.onload = function(){document.body.innerHTML+="fffffff";};
document.getElementsByTagName("head")[0].appendChild(style1);
This code works in Chrome/Firefox, and yet stock browsers on my Froyo (2.3) and Jellybean (4.1) Android devices will print nothing. What's the problem? I'd like if I could execute some js onload of a link. Anything else would in my case amount to a hack. :/
The problem isn't innerHTML. Try it with alerts if you want (at your own peril).
Another answer mentions checking for this functionality by doing
var huh = 'onload' in document.createElement('link');
..but this is true in both stock browsers! wtf guys?
Android browser doesn't support "onload" / "onreadystatechange" events for element: http://pieisgood.org/test/script-link-events/
But it returns:
"onload" in link === true
So, my solution is to detect Android browser from userAgent and then wait for some special css rule in your stylesheet (e.g., reset for "body" margins).
If it's not Android browser and it supports "onload" event- we will use it:
var userAgent = navigator.userAgent,
iChromeBrowser = /CriOS|Chrome/.test(userAgent),
isAndroidBrowser = /Mozilla\/5.0/.test(userAgent) && /Android/.test(userAgent) && /AppleWebKit/.test(userAgent) && !iChromeBrowser;
addCssLink('PATH/NAME.css', function(){
console.log('css is loaded');
});
function addCssLink(href, onload) {
var css = document.createElement("link");
css.setAttribute("rel", "stylesheet");
css.setAttribute("type", "text/css");
css.setAttribute("href", href);
document.head.appendChild(css);
if (onload) {
if (isAndroidBrowser || !("onload" in css)) {
waitForCss({
success: onload
});
} else {
css.onload = onload;
}
}
}
// We will check for css reset for "body" element- if success-> than css is loaded
function waitForCss(params) {
var maxWaitTime = 1000,
stepTime = 50,
alreadyWaitedTime = 0;
function nextStep() {
var startTime = +new Date(),
endTime;
setTimeout(function () {
endTime = +new Date();
alreadyWaitedTime += (endTime - startTime);
if (alreadyWaitedTime >= maxWaitTime) {
params.fail && params.fail();
} else {
// check for style- if no- revoke timer
if (window.getComputedStyle(document.body).marginTop === '0px') {
params.success();
} else {
nextStep();
}
}
}, stepTime);
}
nextStep();
}
Demo: http://codepen.io/malyw/pen/AuCtH