Keep Tensorflow session open in a Kivy app - android

I am trying to run an app made in Kivy along with a Tensorflow session and keep it from loading it every time when I make a prediction. To be more precise, I want to know how I can call the function from inside the session.
Here is the code for the session:
def decode():
# Only allocate part of the gpu memory when predicting.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
config = tf.ConfigProto(gpu_options=gpu_options)
with tf.Session(config=config) as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1
enc_vocab_path = os.path.join(gConfig['working_directory'],"vocab%d.enc" % gConfig['enc_vocab_size'])
dec_vocab_path = os.path.join(gConfig['working_directory'],"vocab%d.dec" % gConfig['dec_vocab_size'])
enc_vocab, _ = data_utils.initialize_vocabulary(enc_vocab_path)
_, rev_dec_vocab = data_utils.initialize_vocabulary(dec_vocab_path)
# !!! This is the function that I'm trying to call. !!!
def answersqs(sentence):
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), enc_vocab)
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
return " ".join([tf.compat.as_str(rev_dec_vocab[output]) for output in outputs])
Here is where I'm calling the function:
def resp(self, msg):
def p():
if len(msg) > 0:
# If I try to do decode().answersqs(msg), it starts a new session.
ansr = answersqs(msg)
ansrbox = Message()
ansrbox.ids.mlab.text = str(ansr)
ansrbox.ids.mlab.color = (1, 1, 1)
ansrbox.pos_hint = {'x': 0}
ansrbox.source = './icons/ansr_box.png'
self.root.ids.chatbox.add_widget(ansrbox)
self.root.ids.scrlv.scroll_to(ansrbox)
threading.Thread(target=p).start()
And here is the last part:
if __name__ == "__main__":
if len(sys.argv) - 1:
gConfig = brain.get_config(sys.argv[1])
else:
# get configuration from seq2seq.ini
gConfig = brain.get_config()
threading.Thread(target=decode()).start()
KatApp().run()
Also, should I change the session from GPU to CPU before I port it on Android?

You should have two variables graph and session that you keep around.
When you load the model you do something like:
graph = tf.Graph()
session = tf.Session(config=config)
with graph.as_default(), session.as_default():
# The reset of your model loading code.
When you need to make a prediction:
with graph.as_default(), session.as_default():
return session.run([your_result_tensor])
What happens is that the sessions is loaded and in memory and you just tell the system that's the context where you want to run.
In your code move def answersqs outside of the with part. It should bind automatically to graph and session from the surrounding function (but you need to make them available outside the with).
For the second part, normally if you follow the guides the exported model should be free of hardware binding information and when you load it tensorflow will figure out a good placement (that might be GPU if available and sufficiently capable).

Related

How do I get the JSON result from Amadeus API (Kotlin Android)

I am trying to use the Amadeus Offers Search API with the following code:
when (val flightOffers = amadeus.shopping.flightOffersSearch.get(
originLocationCode = "MDZ",
destinationLocationCode = "MAD",
departureDate = LocalDate.parse("2020-11-11").toString(),
adults = 2,
max = 1
)) {
is ApiResult.Success -> {
if (flightOffers.succeeded) {
println("RESULT SUCCEEDED")
println(flightOffers.data)
}
else
{
println("RESULT DIDN'T SUCCEEDED")
}
}
is ApiResult.Error -> {
println("RESULT ERROR")
}
}
And if I compile that the logcat output is as follows:
I/System.out: RESULT SUCCEEDED
Which makes me think that flightOffers.data is empty.
However if I try this code:
val flightOffers = amadeus.shopping.flightOffersSearch.get(
originLocationCode = "MDZ",
destinationLocationCode = "MAD",
departureDate = LocalDate.parse("2020-11-11").toString(),
adults = 2,
max = 1
)
println("AMADEUS: $flightOffers")
I get the following output:
I/System.out: AMADEUS: Success(meta=Meta(count=1, links={self=https://test.api.amadeus.com/v2/shopping/flight-offers?originLocationCode=MDZ&destinationLocationCode=MAD&departureDate=2020-11-11&adults=2&max=1}), data=[FlightOfferSearch(type=flight-offer, id=1, source=GDS, instantTicketingRequired=false, nonHomogeneous=false, oneWay=false, lastTicketingDate=2020-05-03, numberOfBookableSeats=7, itineraries=[Itinerary(duration=PT18H, segments=[SearchSegment(departure=AirportInfo(iataCode=MDZ, terminal=null, at=2020-11-11T07:10:00), arrival=AirportInfo(iataCode=AEP, terminal=null, at=2020-11-11T08:45:00), carrierCode=AR, number=1403, aircraft=Aircraft(code=738), duration=PT1H35M, id=1, numberOfStops=0, blacklistedInEU=false, co2Emissions=null), SearchSegment(departure=AirportInfo(iataCode=EZE, terminal=A, at=2020-11-11T13:25:00), arrival=AirportInfo(iataCode=MAD, terminal=1, at=2020-11-12T05:10:00), carrierCode=UX, number=42, aircraft=Aircraft(code=789), duration=PT11H45M, id=2, numberOfStops=0, blacklistedInEU=false, co2Emissions=null)])], price=SearchPrice(currency=EUR, total=1151.26, base=510.0, fees=[Fee(amount=0.0, type=SUPPLIER), Fee(amount=0.0, type=TICKETING)], grandTotal=1151.26), pricingOptions=PricingOptions(includedCheckedBagsOnly=true, fareType=[PUBLISHED], corporateCodes=null, refundableFare=false, noRestrictionFare=false, noPenaltyFare=false), validatingAirlineCodes=[UX], travelerPricings=[TravelerPricing(travelerId=1, fareOption=STANDARD, travelerType=ADULT, price=SearchPrice(currency=EUR, total=575.63, base=255.0, fees=null, grandTotal=0.0), fareDetailsBySegment=[FareDetailsBySegment(segmentId=1, cabin=ECONOMY, fareBasis=ZYYOPO, segmentClass=Q, includedCheckedBags=IncludedCheckedBags(weight=0, weightUnit=null)), FareDetailsBySegment(segmentId=2, cabin=ECONOMY, fareBasis=ZYYOPO, segmentClass=Z, includedCheckedBags=IncludedCheckedBags(weight=0, weightUnit=null))]), TravelerPricing(travelerId=2, fareOption=STANDARD, travelerType=ADULT, price=SearchPrice(currency=EUR, total=575.63, base=255.0, fees=null, grandTotal=0.0), fareDetailsBySegment=[FareDetailsBySegment(segmentId=1, cabin=ECONOMY, fareBasis=ZYYOPO, segmentClass=Q, includedCheckedBags=IncludedCheckedBags(weight=0, weightUnit=null)), FareDetailsBySegment(segmentId=2, cabin=ECONOMY, fareBasis=ZYYOPO, segmentClass=Z, includedCheckedBags=IncludedCheckedBags(weight=0, weightUnit=null))])])], dictionaries={locations={MAD={cityCode=MAD, countryCode=ES}, EZE={cityCode=BUE, countryCode=AR}, MDZ={cityCode=MDZ, countryCode=AR}, AEP={cityCode=BUE, countryCode=AR}}, aircraft={789=BOEING 787-9, 738=BOEING 737-800}, currencies={EUR=EURO}, carriers={AR=AEROLINEAS ARGENTINAS, UX=AIR EUROPA}})
Which means that the API is returning a JSON but then I can't use flightOffers with gson to pass this data to a DataClass because flightOffers is a ApiResult> and I don't know how to use that. According to their library docs it should be done like I did it in the first try.
I appreciate all the help and advice I can get. This is my first Android App.
Nice to see that we have a new Android developer in the community !
So first, in Android you should avoid using println, instead you should use Log.d/e/w/i, this method will print your result in android logcat.
For what I see you successfully setup your project and where able to make query from the sdk.
In the android sdk, every get() will give you a correct data object and not just JSON. You don't have to take care of parsing the answer. The thing you have in your flightOffers.data is in fact a List<FlightOfferSearch> that you can use right away !

ARCore 1.2 Unity Create AugmentedImageDatabase on the fly

I am trying to dynamically create an image database using arcores new image tracking feature.
Currently I have a server serving me image locations which I download to the persistent data path of my device. I use these images to then create new database entries like below:
Public Variables:
public AugmentedImageDatabase newBD;
public AugmentedImageDatabaseEntry newEntry;
Here I do regex matching to get the images from the datapath and convert them to texture2D's in order to populate the AugmentedImageDatabaseEntry values.
Regex r1 = new Regex(#"https?://s3-([^.]+).amazonaws.com/([^/]+)/([^/]+)/(.*)");
// Match the input for file name
Match match = r1.Match(input);
if (match.Success)
{
string v = match.Groups[4].Value;
RegexMatch = v;
Texture2D laodedTexture = LoadTextureToFile(v);
laodedTexture.EncodeToPNG();
AugmentedImageDatabaseEntry newEntry = new AugmentedImageDatabaseEntry(v, laodedTexture, Application.persistentDataPath + "/" + v);
newEntry.Name = v;
newEntry.Texture = laodedTexture;
newEntry.TextureGUID = Application.persistentDataPath + "/" + v;
Debug.Log(newEntry.Name);
Debug.Log(newEntry.Texture);
Debug.Log(newEntry.TextureGUID);
newBD.Add(newEntry);
}
To get this to work on android I had to modify the source of ARCore's unity implementation a little so that the database.Add() function would work outside of the editor.
All of this seems to work seamlessly as I don't get any errors yet.
Once I change scenes to the ARCore scene I instantiate an ARCore Camera and create a new sessionconfig which holds a reference to the database populated above.
Here is that code:
public class NewConfigSetup : MonoBehaviour {
public GameObject downloadManager;
public GameObject arcoreDevice;
// Use this for initialization
void Start () {
downloadManager = GameObject.Find("DownlaodManager");
TestModelGenerator generator = downloadManager.GetComponent<TestModelGenerator>();
GoogleARCore.ARCoreSessionConfig newconfig = new GoogleARCore.ARCoreSessionConfig();
GoogleARCore.ARCoreSessionConfig config = ScriptableObject.CreateInstance<GoogleARCore.ARCoreSessionConfig>();
config.AugmentedImageDatabase = generator.newBD;
Debug.Log("transfered db size --------------- " + config.AugmentedImageDatabase.Count);
arcoreDevice.GetComponent<GoogleARCore.ARCoreSession>().SessionConfig = config;
Instantiate(arcoreDevice,new Vector3(0,0,0), Quaternion.identity);
}
}
When I run in the editor, I dont get errors untill I view the database in the editor, thats when I get this error:
ERROR: flag '--input_image_path' is missing its argument; flag
description: Path of image to be evaluated. Currently only supports
*.png, *.jpg and *.jpeg.
When I debug and look in the memory of the AugmentedImageDatabase. Everything seems to be there and working fine. Also once I build for android I get no errors whatsoever, as well as when I use 'adb logcat -s Unity' in the command line, no exceptions are thrown.
Could this be a limitation with the new ARCore feature? Are the AugmentedImageDatabases not allowing for dynamic creation on android? If so than why are there built in functions for creating them?
I understand the features are brand new and there is not much documentation anywhere so any help would be greatly appreciated.
I posted an Issue on ARCore's Github page, and got a response that the feature you're talking about isn't yet exposed in the Unity API :
https://github.com/google-ar/arcore-unity-sdk/issues/256

SNMP: Recover whole MIBtree with python using bulk command

I have a SNMP agent in android.
I'm trying to recover the whole MIBTree with this python script:
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, \
varBinds = cmdGen.bulkCmd(
cmdgen.CommunityData('public', mpModel=0),
cmdgen.UdpTransportTarget(('192.168.0.90', 32150)),
0,
25,
(1,3,6,1,4,1,12619,1,1)
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([repr(x) for x in varBind]))
if I use the oid (1,3,6,1,4,1,12619,1,1) as the root oid, the output is this:
ObjectType(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.1.1.0')),OctetString('intel ICI101'))
ObjectType(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.1.2.0')),OctetString('4.4.4'))
ObjectType(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.1.3.0')), TimeTicks(10100333))
(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.2.1.0')), EndOfMibView())
It is working right. The problem is I want the whole MIB tree, so I want to use the root oid which is (1,3,6,1,4,1,12619,1). But the output using that OID is:
OIDs are not increasing
How can make this work?
ignoreNonIncreasingOid = True
adding that parameter as an option made it work.
varBinds = cmdGen.bulkCmd(
cmdgen.CommunityData('public', mpModel=0),
cmdgen.UdpTransportTarget(('192.168.0.90', 32150)),
0,
25,
(1,3,6,1,4,1,12619,1),
ignoreNonIncreasingOid = True
)

Loading Tensorflow graph from binary file in Android application

I'm trying to integrate TensorFlow into an Android application. Since I'm new to TensorFlow, I'm proceeding starting with very simple operations.
As a first step I created just the following model:
import tensorflow as tf
with tf.Graph().as_default() as g:
​
x = tf.placeholder("float", [1, 10], name="input")
a = tf.zeros([10, 5], dtype=tf.float32, name="a")
y = tf.matmul(x, a, name="output")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
​
graph_def = g.as_graph_def()
tf.train.write_graph(graph_def, 'models/', 'graph.pb', as_text=False)
This works fine. I'm able to properly get output from my C++ code (and then from Android as well).
I tried then to generate a by using tf.random_normal. It seems this is not feasible by just replacing tf.zeros with tf.random_normal. That's because tf.zeros returs a costant, while tf.random_normal not. In particular it seems I must handle it as a variable.
Idea I followed is the same proposed in other examples I've found on GitHub... so, evaluating a before writing the graph, as reported in code below:
import tensorflow as tf
a = tf.Variable(tf.random_normal([10, 5], dtype=tf.float32), name="a")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
a_eval = a.eval(sess)
# print here properly produces matrix in output
print a_eval
sess.close()
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [1, 10], name="input")
a_2 = tf.constant(a_eval, name="a_2")
y = tf.matmul(x, a_2, name="output")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
graph_def = g.as_graph_def()
tf.train.write_graph(graph_def, 'models/', 'graph.pb', as_text=False)
Unfortunately it seems it doesn't work due to an error occurring in reading binary file:
Out of range: Read less bytes than requested
This is C++ code that I'm currently using for loading graph from file:
tensorflow::GraphDef graph_def;
Status load_graph_status = ReadBinaryProto(Env::Default(), filepath, &graph_def);
if (!load_graph_status.ok()) {
LOG(ERROR) << "could not create tensorflow graph: " << load_graph_status;
return NULL;
}
Hope someone could help me with this problem.

Cast image (photo) to Chromecast

I'm following these (1, 2) guides to create a sender Android application for Chromecast and I'm only interested in sending pictures.
There are a lot of informaton and samples how to cast Text, Audio and Video. But not a single word how to that with Pictures.
I belive in power of stackoferflow and someone should've faced such problem. Please give some good sample or tutorial. All I need is guide to cast fullscreen picture using Media Router and its features.
Thats how I was sending text message using custom channel:
/**
* Send a text message to the receiver
*/
private void sendMessage(String message) {
if (mApiClient != null && mSmartBusChannel != null) {
try {
Cast.CastApi.sendMessage(mApiClient,
mSmartBusChannel.getNamespace(), message)
.setResultCallback(new ResultCallback<Status>() {
#Override
public void onResult(Status result) {
if (!result.isSuccess()) {
Log.e(TAG, "Sending message failed");
}
}
});
} catch (Exception e) {
Log.e(TAG, "Exception while sending message", e);
}
} else {
Toast.makeText(this, message, Toast.LENGTH_SHORT)
.show();
}
}
Video is sending using RemotePlaybackClient.. Okay, what's about pictures?
Much thanks for any help.
EDIT:
I have found out method (on this blog) of how it is possible to send pictures from local storage. And yeah, that doesn't seem really working.
public final void openPhotoOnChromecast(String title, String url, String ownerName, String description) {
try {
Log.d(TAG, "openPhotoOnChromecast: " + url);
JSONObject payload = new JSONObject();
payload.put(KEY_COMMAND, "viewphoto");
payload.put("fullsizeUrl", url);
payload.put("ownerName", ownerName);
payload.put("title", title);
payload.put("description", description);
sendMessage(payload);
} catch (JSONException e) {
Log.e(TAG, "Cannot parse or serialize data for openPhotoOnChromecast", e);
} catch (IOException e) {
Log.e(TAG, "Unable to send openPhotoOnChromecast message", e);
} catch (IllegalStateException e) {
Log.e(TAG, "Message Stream is not attached", e);
}
}
P.S. this method uses sendMessage(...) from these libraries (from gradle):
compile files('libs/commons-io-2.4.jar')
compile files('libs/GoogleCastSdkAndroid.jar')
Looking here: Examples using CastCompanionLibrary to simply display an image There are really three options for sending images to a Chromecast.
You can encode the image in a base64 string and send it over a
data channel to the receiver. If it is too big, you can split it up
and send it across in multiple messages. This is a really poor use
of the cast technology and really you shouldn't do this, but it is
possible.
You could simply send a url to the Chromecast device and grab it
from your sever inside the receiver app. This the the recommended
way to send photos across to the Chromecast
If you aren't downloading your images from a server you could set
up your own server running inside your client Android app and send a
url to the receiver to grab it from there. This is rather
complicated for sending images across, but is a far more robust
option than option 1.
The goal of Chromecast, according to Google, is to stream content from the cloud, which is why there isn't really any native support for sending local images. Developers should be encouraged to load images on the receiver application from a server.
Here is a pretty well documented example of how to make a slideshow / serve images from a local folder in Linux / Ubuntu:
https://github.com/sbow/pyCast
The directory / file types are specified at runtime - or default values can be used.
The code makes use of the module pychromecast & generates a simple webserver to make the images available to the Chromecast.
Code Examples
Create a local webserver
# Start webserver for current directory
def startServer(args, PORT=8000):
os.chdir(args.directory)
handler = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("", PORT), handler) as httpd:
print("Server started at localhost:" + str(PORT))
httpd.serve_forever()
# Start new thread for webserver
daemon = threading.Thread(name='daemon_server',
target=startServer,
args=(args, PORT))
daemon.setDaemon(True) # Set as a daemon so it will be killed once the main thread is dead.
daemon.start()
Build URL's For Local Images
# Build uri of first image for slideshow. This is sent to the chromecast. This
# ends up being a mash up of the host ip address, the webserver port, and the
# file name of the image to be displayed.
fileName = os.path.basename(filesAndPath[nStartFile])
fileUrl = urllib.parse.quote(fileName)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ipAddr = s.getsockname()[0]
fileUri = 'http://'+ipAddr+':'+'8000/'+fileUrl
Setup Chromecast
# -- Setup chromecast --
# List chromecasts on the network, but don't connect
services, browser = pychromecast.discovery.discover_chromecasts()
# Shut down discovery
pychromecast.discovery.stop_discovery(browser)
chromecasts, browser = pychromecast.get_listed_chromecasts(
friendly_names=[args.cast]
)
if not chromecasts:
print(f'No chromecast with name "{args.cast}" discovered')
sys.exit(1)
cast = chromecasts[0]
# Start socket client's worker thread and wait for initial status update
cast.wait()
print(f'Found chromecast with name "{args.cast}", attempting to play "{args.url}"')
cast.media_controller.play_media(fileUri, MEDIA_TAG)
# Wait for player_state PLAYING
player_state = None
has_played = False
# -- end Setup chromecast --
Infinite loop serving images from folder
# Enter the infinite loop where successive images are displayed via the
# chromecast, by sending it image uri's served by our scripts webserver,
# linking the chromecast to images in our directory.
iPhoto = nStartFile
iPhotoMax = nFiles-1
while True:
try:
if player_state != cast.media_controller.status.player_state:
player_state = cast.media_controller.status.player_state
print("Player state:", player_state)
if player_state == "PLAYING":
has_played = True
if cast.socket_client.is_connected and has_played and player_state != "PLAYING":
has_played = False
cast.media_controller.play_media(args.url, "audio/mp3")
time.sleep(args.pause)
if args.do_random:
nRandom = random.random()*nFiles
iPhoto = round(nRandom)
else:
iPhoto = iPhoto + 1
if iPhoto > iPhotoMax:
iPhoto = 0
fileName = os.path.basename(filesAndPath[iPhoto])
fileUrl = urllib.parse.quote(fileName)
fileUri = 'http://'+ipAddr+':'+'8000/'+fileUrl
cast.media_controller.play_media(fileUri, MEDIA_TAG)
except KeyboardInterrupt:
break
Full program pyCast.py
"""
Play a slideshow on the chromecast
This program allows the user to cast images to their chromecast.
The images are of a particular type ie: ".JPEG" or ".jpg" or ".png",
and contained in a single folder. These parameters are provided,
among others, at command line invocation - or through tuning of
the default parameters below.
Arguments
__________
--show-debug : (none)
Show debugging information. False if not provided.
--do-random : (none)
Select image order at random. Ls order if not provided.
--media-flag : '*.jpeg'
Indicate via a command line regex file type to show
--media-tag : 'image/jpeg'
Indicate http object type
--cast : 'MyKitchenChromecast'
Provide friendly name of chromecast
--directory : '/home/barack/SecretPix'
Provide absolute path to directory for slideshow
--pause : 69
Number of seconds to hold each image in slideshow
Returns
_______
does not return. Ctrl-C to exit, or launch with "&" and kill process
Examples
______
python pyCast.py --show-debug --media-flag '*.JPEG' --media-tag 'image/jpeg'
--cast 'MyChromecast' --directory '/home/dorthy/OzGirlSummerPics' --do-random
"""
# pylint: disable=invalid-name
import argparse
import logging
import sys
import time
import pychromecast
import pprint
import glob
import os
import urllib.parse
import socket
import http.server
import socketserver
import threading
import random
# Authorship information
__author__ = "Shaun Bowman"
__copyright__ = "Copywrong 2022, Mazeltough Project"
__credits__ = ["SoManyCopyPastes... sorry i dont know the names", "Mom"]
__license__ = "MIT"
__version__ = "0.420.69"
__maintainer__ = "Shaun Bowman"
__email__ = "dm#me.com"
__status__ = "AlphaAF"
# Change to the friendly name of your Chromecast
CAST_NAME = 'ShaunsOfficeMonitor'
# Set webserver port
PORT = 8000
# Set time for photo
PAUSE = 120
# Set media type
MEDIA_FLAG = "*.JPEG"
MEDIA_TAG = "image/jpeg"
# Change to an audio or video url
MEDIA_URL ="http://192.168.0.222:8000/Screenshot%20from%202021-01-24%2023-11-40.png"
MEDIA_DIR = "./"
parser = argparse.ArgumentParser(
description="Play a slideshow on Chromecast using all images of a given "+
"type in a given directory."
)
parser.add_argument("--show-debug", help="Enable debug log", action="store_true")
parser.add_argument("--do-random", help="Pick media in dir at random, default false",
action="store_false")
parser.add_argument(
"--media-flag", help="Media flag like *.JPEG or *.png", default=MEDIA_FLAG
)
parser.add_argument(
"--media-tag", help="Media tag like 'image/jpeg' or 'image/png'",
default=MEDIA_TAG
)
parser.add_argument(
"--pause", help="Number of seconds per photograph during slideshow",
default=PAUSE
)
parser.add_argument(
"--cast", help='Name of cast device (default: "%(default)s")', default=CAST_NAME
)
parser.add_argument(
"--url", help='Media url (default: "%(default)s")', default=MEDIA_URL
)
parser.add_argument(
"--directory", help='Directory containing media to cast', default=MEDIA_DIR
)
args = parser.parse_args()
if args.show_debug:
logging.basicConfig(level=logging.DEBUG)
# Start webserver for current directory
def startServer(args, PORT=8000):
os.chdir(args.directory)
handler = http.server.SimpleHTTPRequestHandler
with socketserver.TCPServer(("", PORT), handler) as httpd:
print("Server started at localhost:" + str(PORT))
httpd.serve_forever()
# Start new thread for webserver
daemon = threading.Thread(name='daemon_server',
target=startServer,
args=(args, PORT))
daemon.setDaemon(True) # Set as a daemon so it will be killed once the main thread is dead.
daemon.start()
# Wait for stuff... maybe useless
time.sleep(2)
# Get list of files of specific type, in specific directory
pprint.pprint(glob.glob(args.directory+"/"+MEDIA_FLAG))
filesAndPath = glob.glob(args.directory+"/"+MEDIA_FLAG)
nFiles = len(filesAndPath)
if (nFiles==0):
pprint.pprint("Error: No files found")
sys.exit(1)
# Select starting point for slideshow
random.seed()
nRandom = random.random()*nFiles
nStartFile = round(nRandom)
# Build uri of first image for slideshow. This is sent to the chromecast. This
# ends up being a mash up of the host ip address, the webserver port, and the
# file name of the image to be displayed.
fileName = os.path.basename(filesAndPath[nStartFile])
fileUrl = urllib.parse.quote(fileName)
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect(("8.8.8.8", 80))
ipAddr = s.getsockname()[0]
fileUri = 'http://'+ipAddr+':'+'8000/'+fileUrl
# -- Setup chromecast --
# List chromecasts on the network, but don't connect
services, browser = pychromecast.discovery.discover_chromecasts()
# Shut down discovery
pychromecast.discovery.stop_discovery(browser)
chromecasts, browser = pychromecast.get_listed_chromecasts(
friendly_names=[args.cast]
)
if not chromecasts:
print(f'No chromecast with name "{args.cast}" discovered')
sys.exit(1)
cast = chromecasts[0]
# Start socket client's worker thread and wait for initial status update
cast.wait()
print(f'Found chromecast with name "{args.cast}", attempting to play "{args.url}"')
cast.media_controller.play_media(fileUri, MEDIA_TAG)
# Wait for player_state PLAYING
player_state = None
has_played = False
# -- end Setup chromecast --
# Enter the infinite loop where successive images are displayed via the
# chromecast, by sending it image uri's served by our scripts webserver,
# linking the chromecast to images in our directory.
iPhoto = nStartFile
iPhotoMax = nFiles-1
while True:
try:
if player_state != cast.media_controller.status.player_state:
player_state = cast.media_controller.status.player_state
print("Player state:", player_state)
if player_state == "PLAYING":
has_played = True
if cast.socket_client.is_connected and has_played and player_state != "PLAYING":
has_played = False
cast.media_controller.play_media(args.url, "audio/mp3")
time.sleep(args.pause)
if args.do_random:
nRandom = random.random()*nFiles
iPhoto = round(nRandom)
else:
iPhoto = iPhoto + 1
if iPhoto > iPhotoMax:
iPhoto = 0
fileName = os.path.basename(filesAndPath[iPhoto])
fileUrl = urllib.parse.quote(fileName)
fileUri = 'http://'+ipAddr+':'+'8000/'+fileUrl
cast.media_controller.play_media(fileUri, MEDIA_TAG)
except KeyboardInterrupt:
break
# Shut down discovery
browser.stop_discovery()

Categories

Resources