I have a SNMP agent in android.
I'm trying to recover the whole MIBTree with this python script:
from pysnmp.entity.rfc3413.oneliner import cmdgen
cmdGen = cmdgen.CommandGenerator()
errorIndication, errorStatus, errorIndex, \
varBinds = cmdGen.bulkCmd(
cmdgen.CommunityData('public', mpModel=0),
cmdgen.UdpTransportTarget(('192.168.0.90', 32150)),
0,
25,
(1,3,6,1,4,1,12619,1,1)
)
if errorIndication:
print(errorIndication)
elif errorStatus:
print('%s at %s' % (errorStatus.prettyPrint(),
errorIndex and varBinds[int(errorIndex) - 1][0] or '?'))
else:
for varBind in varBinds:
print(' = '.join([repr(x) for x in varBind]))
if I use the oid (1,3,6,1,4,1,12619,1,1) as the root oid, the output is this:
ObjectType(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.1.1.0')),OctetString('intel ICI101'))
ObjectType(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.1.2.0')),OctetString('4.4.4'))
ObjectType(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.1.3.0')), TimeTicks(10100333))
(ObjectIdentity(ObjectName('1.3.6.1.4.1.12619.1.2.1.0')), EndOfMibView())
It is working right. The problem is I want the whole MIB tree, so I want to use the root oid which is (1,3,6,1,4,1,12619,1). But the output using that OID is:
OIDs are not increasing
How can make this work?
ignoreNonIncreasingOid = True
adding that parameter as an option made it work.
varBinds = cmdGen.bulkCmd(
cmdgen.CommunityData('public', mpModel=0),
cmdgen.UdpTransportTarget(('192.168.0.90', 32150)),
0,
25,
(1,3,6,1,4,1,12619,1),
ignoreNonIncreasingOid = True
)
Related
I am trying to run an app made in Kivy along with a Tensorflow session and keep it from loading it every time when I make a prediction. To be more precise, I want to know how I can call the function from inside the session.
Here is the code for the session:
def decode():
# Only allocate part of the gpu memory when predicting.
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.2)
config = tf.ConfigProto(gpu_options=gpu_options)
with tf.Session(config=config) as sess:
# Create model and load parameters.
model = create_model(sess, True)
model.batch_size = 1
enc_vocab_path = os.path.join(gConfig['working_directory'],"vocab%d.enc" % gConfig['enc_vocab_size'])
dec_vocab_path = os.path.join(gConfig['working_directory'],"vocab%d.dec" % gConfig['dec_vocab_size'])
enc_vocab, _ = data_utils.initialize_vocabulary(enc_vocab_path)
_, rev_dec_vocab = data_utils.initialize_vocabulary(dec_vocab_path)
# !!! This is the function that I'm trying to call. !!!
def answersqs(sentence):
token_ids = data_utils.sentence_to_token_ids(tf.compat.as_bytes(sentence), enc_vocab)
bucket_id = min([b for b in xrange(len(_buckets))
if _buckets[b][0] > len(token_ids)])
encoder_inputs, decoder_inputs, target_weights = model.get_batch(
{bucket_id: [(token_ids, [])]}, bucket_id)
_, _, output_logits = model.step(sess, encoder_inputs, decoder_inputs,
target_weights, bucket_id, True)
outputs = [int(np.argmax(logit, axis=1)) for logit in output_logits]
if data_utils.EOS_ID in outputs:
outputs = outputs[:outputs.index(data_utils.EOS_ID)]
return " ".join([tf.compat.as_str(rev_dec_vocab[output]) for output in outputs])
Here is where I'm calling the function:
def resp(self, msg):
def p():
if len(msg) > 0:
# If I try to do decode().answersqs(msg), it starts a new session.
ansr = answersqs(msg)
ansrbox = Message()
ansrbox.ids.mlab.text = str(ansr)
ansrbox.ids.mlab.color = (1, 1, 1)
ansrbox.pos_hint = {'x': 0}
ansrbox.source = './icons/ansr_box.png'
self.root.ids.chatbox.add_widget(ansrbox)
self.root.ids.scrlv.scroll_to(ansrbox)
threading.Thread(target=p).start()
And here is the last part:
if __name__ == "__main__":
if len(sys.argv) - 1:
gConfig = brain.get_config(sys.argv[1])
else:
# get configuration from seq2seq.ini
gConfig = brain.get_config()
threading.Thread(target=decode()).start()
KatApp().run()
Also, should I change the session from GPU to CPU before I port it on Android?
You should have two variables graph and session that you keep around.
When you load the model you do something like:
graph = tf.Graph()
session = tf.Session(config=config)
with graph.as_default(), session.as_default():
# The reset of your model loading code.
When you need to make a prediction:
with graph.as_default(), session.as_default():
return session.run([your_result_tensor])
What happens is that the sessions is loaded and in memory and you just tell the system that's the context where you want to run.
In your code move def answersqs outside of the with part. It should bind automatically to graph and session from the surrounding function (but you need to make them available outside the with).
For the second part, normally if you follow the guides the exported model should be free of hardware binding information and when you load it tensorflow will figure out a good placement (that might be GPU if available and sufficiently capable).
This question already has answers here:
Read binary stdout data like screencap data from adb shell?
(19 answers)
Closed 5 years ago.
When I use adb exec-out screencap -p in python commands.getstatusoutput or subprocess.call on my Macbook to get screenshot like bellow, I can get perfect png file bytes, but when running on Windows, I just get an cannot identify image file <_io.BytesIO object at 0x000002ADDDB49BF8> when Image.open()
def cmd(line, out_is_binary=False):
cmdline = line if isinstance(line, str) else ' '.join(line)
with tempfile.TemporaryFile() as stdout:
status = subprocess.call(line, stdout=stdout, stderr=stdout)
stdout.seek(0)
output = stdout.read()
output = str(output, 'utf-8') if not out_is_binary else output
output_log = output if not out_is_binary else '<binary data>'
print('"%s" returned %s, and says:%s%s' % (cmdline, status, os.linesep, output_log))
return status, output
def capture():
line = [ADB_BIN, 'exec-out', 'screencap', '-p']
status, output = cmd(line, out_is_binary=True)
if status:
raise RuntimeError('通过USB调试截屏失败')
fp = BytesIO(output)
return Image.open(fp)
PS: This question should not be the duplication of adb question. Because the point in this is the way to get a screenshot in a DAMMIT Windows CMD or Python in Windows.
finally I got the solution like bellow, if run on windows, use base64 to transfer data and then decode it in python
def capture():
line = [ADB_BIN, 'exec-out', 'screencap', '-p']
if os.name == 'nt':
line = [ADB_BIN, 'shell', 'screencap -p | base64']
status, output = cmd(line, out_is_binary=True)
if status:
raise RuntimeError('通过USB调试截屏失败')
if os.name == 'nt':
output = base64.decodebytes(output)
fp = BytesIO(output)
return Image.open(fp)
python.py
from pymongo import MongoClient
from flask import Flask
app = Flask(__name__)
host = "10.0.0.10"
port = 8085
#app.route('/name/<string:name>',methods=['GET','POST'])
def GetNoteText(name):
print name
return "Data Received"
#app.route('/', methods=['POST'])
def abc():
print "Hii"
return ('Welcome')
users=[]
#app.route('/getNames')
def getName():
client = MongoClient('mongodb://localhost:27017/')
db = client.bridgeUserInformationTable
cursor = db.bridgeUsersInfo.find()
for document in cursor:
#print "Name : ",document['name']
users.append(document['name'])
print document['name']
#print (users)
return "<html><body><h1>"+str(users)+"</h1></body></html>"
if __name__ == '__main__':
app.run(
host=host, port=port
)
node.j
var PythonShell = require('python-shell');
PythonShell.run('pass.py', function (err) {
if (err) throw err;
console.log('finished');
});
As i tried can we call python script in node js after running node js script from getting input from the android device? I am lit bit confused how it should be solved? And how both languages should communicate each other like python to node js?
yes, we can call python file from node js you may use cmd-node package for this purpose.
const cmd=require('node-cmd');
const processRef=cmd.run('python -i');
let data_line = '';
//listen to the python terminal output
processRef.stdout.on(
'data',
function(data) {
data_line += data;
if (data_line[data_line.length-1] == '\n') {
console.log(data_line);
}
}
);
const pythonTerminalInput=`primes = [2, 3, 5, 7]
for prime in primes:
print(prime)
`;
//show what we are doing
console.log(`>>>${pythonTerminalInput}`);
//send it to the open python terminal
processRef.stdin.write(pythonTerminalInput);
ZERORPC is a really nifty library built on top of ZeroMQ. This is probably the easiest way to make call python code from Node.
For a really simple approach and non-robust approach, you could use a tmp file to write the python commands from Node. With an event loop running inside Python, read the tmp file for any changes and execute the commands therein.
I'm trying to integrate TensorFlow into an Android application. Since I'm new to TensorFlow, I'm proceeding starting with very simple operations.
As a first step I created just the following model:
import tensorflow as tf
with tf.Graph().as_default() as g:
x = tf.placeholder("float", [1, 10], name="input")
a = tf.zeros([10, 5], dtype=tf.float32, name="a")
y = tf.matmul(x, a, name="output")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
graph_def = g.as_graph_def()
tf.train.write_graph(graph_def, 'models/', 'graph.pb', as_text=False)
This works fine. I'm able to properly get output from my C++ code (and then from Android as well).
I tried then to generate a by using tf.random_normal. It seems this is not feasible by just replacing tf.zeros with tf.random_normal. That's because tf.zeros returs a costant, while tf.random_normal not. In particular it seems I must handle it as a variable.
Idea I followed is the same proposed in other examples I've found on GitHub... so, evaluating a before writing the graph, as reported in code below:
import tensorflow as tf
a = tf.Variable(tf.random_normal([10, 5], dtype=tf.float32), name="a")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
a_eval = a.eval(sess)
# print here properly produces matrix in output
print a_eval
sess.close()
with tf.Graph().as_default() as g:
x = tf.placeholder(tf.float32, [1, 10], name="input")
a_2 = tf.constant(a_eval, name="a_2")
y = tf.matmul(x, a_2, name="output")
init = tf.initialize_all_variables()
sess = tf.Session()
sess.run(init)
graph_def = g.as_graph_def()
tf.train.write_graph(graph_def, 'models/', 'graph.pb', as_text=False)
Unfortunately it seems it doesn't work due to an error occurring in reading binary file:
Out of range: Read less bytes than requested
This is C++ code that I'm currently using for loading graph from file:
tensorflow::GraphDef graph_def;
Status load_graph_status = ReadBinaryProto(Env::Default(), filepath, &graph_def);
if (!load_graph_status.ok()) {
LOG(ERROR) << "could not create tensorflow graph: " << load_graph_status;
return NULL;
}
Hope someone could help me with this problem.
I've implemented a service that listens to commands issued through ADB. An example of a command sent through ADB could look like this:
adb shell am startservice -a com.testandroid.SEND_SMS -e number 123123123 -e message "åäö"
Now, the problem here is that the encoding of the string "åäö" seems to mess up. If I take that string extras and immediately output it to the log, I get a square "[]", unknown character. If I send this message I get chinese characters in the messages app. As long as I stick to non-umlaut characters (ASCII I guess), everything works fine.
I'm using Windows 7 and the command line for this. I have not touched the encoding of the command line and I've tried to process the extras string by getting the byte characters, passing in UTF-8 as an encoding argument, then creating a new String passing in UTF-8 as an encoding argument there as well. No dice, though.
The values of the bytes, when using getBytes() are å: -27, ä: -92, ö: -74
How do I get this to play nice so I can make use of at least the umlauts?
All of this works perfectly fine in Linux.
i ran into the same issue, but finally i got it work!
if you use for example C#, you have to do it like the following example:
02.12.2019
According to the protocol.txt, the ADB-Protocol supports "smart-sockets". Those sockets can be used to do all the stuff, the ADB-Client inside the adb.exe does. For example if you want upload an file, you have to request such an "smart-socket". After that, you have to follow the protocol assigned to the service (for an service overview see SERVICE.txt) as described, for example, in the SYNC.txt.
13.10.2014
public static List<string> ExecuteBG(string exe, string args, int timeOut = -1)
{
if (File.Exists(exe) || exe == "cmd.exe")
{
ProcessStartInfo StartInfo = new ProcessStartInfo();
StartInfo.FileName = exe;
StartInfo.Arguments = Encoding.Default.GetString(Encoding.UTF8.GetBytes(args));
StartInfo.CreateNoWindow = true;
StartInfo.UseShellExecute = false;
StartInfo.RedirectStandardError = true;
StartInfo.RedirectStandardOutput = true;
StartInfo.StandardErrorEncoding = Encoding.UTF8;
StartInfo.StandardOutputEncoding = Encoding.UTF8;
AutoResetEvent errorWaitHandle = new AutoResetEvent(false);
AutoResetEvent outputWaitHandle = new AutoResetEvent(false);
List<string> response = new List<string>();
Process proc = new Process();
proc.StartInfo = StartInfo;
proc.ErrorDataReceived += (s, e) =>
{
if (String.IsNullOrEmpty(e.Data))
{
errorWaitHandle.Set();
}
else
{
response.Add(e.Data);
}
};
proc.OutputDataReceived += (s, e) =>
{
if (String.IsNullOrEmpty(e.Data))
{
outputWaitHandle.Set();
}
else
{
response.Add(e.Data);
}
};
proc.Start();
proc.BeginErrorReadLine();
proc.BeginOutputReadLine();
proc.WaitForExit(timeOut);
errorWaitHandle.WaitOne(timeOut);
outputWaitHandle.WaitOne(timeOut);
return response;
}
return new List<string>();
}
Really important is this part "StartInfo.Arguments = Encoding.Default.GetString(Encoding.UTF8.GetBytes(args));", here we convert the UTF8 string into the Windows "default" charset which is known by cmd. So we send a "destroyed" "default" encoded string to cmd and the Android shell will convert it back to UTF8. So we have the "umlauts" like "üöäÜÖÄàè etc.".
Hope this helps someone.
PS: If u need a working "Framework" which supports UTF8 push/pull for files/folders also have a look at my AndroidCtrl.dll it's C# .NET4 written.
Regards,
Sebastian
Concluding, either the problem is situated in cmd.exe or adb.exe. Until either one or both are updated to be more compliant with eachother I will sadly not be able to make use of this for the time being.