Watson unity android bad recognition - android

Guys I'm using Unity and Watson to make and vr app that accepts voice commands. I'm using the ExampleStreaming.cs provided with the asset but the recognition is terrible. I change the language model to Portuguese on the code. I'm using Samsung galaxy s7 on gear vr with the mic plugged in. I guess the app is using the cellphone mic and not the plugged one. Any help here.
Here's the code
private int _recordingRoutine = 0;
private string _microphoneID = null;
private AudioClip _recording = null;
private int _recordingBufferSize = 1;
private int _recordingHZ = 22050;
private SpeechToText _service;
public GolemController GolemControllerObj;
void Start()
{
LogSystem.InstallDefaultReactors();
Runnable.Run(CreateService());
}
private IEnumerator CreateService()
{
// Create credential and instantiate service
Credentials credentials = null;
if (!string.IsNullOrEmpty(_username) && !string.IsNullOrEmpty(_password))
{
// Authenticate using username and password
credentials = new Credentials(_username, _password, _serviceUrl);
}
else if (!string.IsNullOrEmpty(_iamApikey))
{
// Authenticate using iamApikey
TokenOptions tokenOptions = new TokenOptions()
{
IamApiKey = _iamApikey,
IamUrl = _iamUrl
};
credentials = new Credentials(tokenOptions, _serviceUrl);
// Wait for tokendata
while (!credentials.HasIamTokenData())
yield return null;
}
else
{
throw new WatsonException("Please provide either username and password or IAM apikey to authenticate the service.");
}
_service = new SpeechToText(credentials);
_service.StreamMultipart = true;
_service.RecognizeModel="pt-BR_BroadbandModel";
Active = true;
StartRecording();
}
public bool Active
{
get { return _service.IsListening; }
set
{
if (value && !_service.IsListening)
{
_service.DetectSilence = true;
_service.EnableWordConfidence = true;
_service.EnableTimestamps = true;
_service.SilenceThreshold = 0.01f;
_service.MaxAlternatives = 0;
_service.EnableInterimResults = true;
_service.OnError = OnError;
_service.InactivityTimeout = -1;
_service.ProfanityFilter = false;
_service.SmartFormatting = true;
_service.SpeakerLabels = false;
_service.WordAlternativesThreshold = null;
_service.StartListening(OnRecognize, OnRecognizeSpeaker);
}
else if (!value && _service.IsListening)
{
_service.StopListening();
}
}
}
private void StartRecording()
{
if (_recordingRoutine == 0)
{
UnityObjectUtil.StartDestroyQueue();
_recordingRoutine = Runnable.Run(RecordingHandler());
}
}
private void StopRecording()
{
if (_recordingRoutine != 0)
{
Microphone.End(_microphoneID);
Runnable.Stop(_recordingRoutine);
_recordingRoutine = 0;
}
}
private void OnError(string error)
{
Active = false;
Log.Debug("ExampleStreaming.OnError()", "Error! {0}", error);
}
private IEnumerator RecordingHandler()
{
Log.Debug("ExampleStreaming.RecordingHandler()", "devices: {0}", Microphone.devices);
_recording = Microphone.Start(_microphoneID, true, _recordingBufferSize, _recordingHZ);
yield return null; // let _recordingRoutine get set..
if (_recording == null)
{
StopRecording();
yield break;
}
bool bFirstBlock = true;
int midPoint = _recording.samples / 2;
float[] samples = null;
while (_recordingRoutine != 0 && _recording != null)
{
int writePos = Microphone.GetPosition(_microphoneID);
if (writePos > _recording.samples || !Microphone.IsRecording(_microphoneID))
{
Log.Error("ExampleStreaming.RecordingHandler()", "Microphone disconnected.");
StopRecording();
yield break;
}
if ((bFirstBlock && writePos >= midPoint)
|| (!bFirstBlock && writePos < midPoint))
{
// front block is recorded, make a RecordClip and pass it onto our callback.
samples = new float[midPoint];
_recording.GetData(samples, bFirstBlock ? 0 : midPoint);
AudioData record = new AudioData();
record.MaxLevel = Mathf.Max(Mathf.Abs(Mathf.Min(samples)), Mathf.Max(samples));
record.Clip = AudioClip.Create("Recording", midPoint, _recording.channels, _recordingHZ, false);
record.Clip.SetData(samples, 0);
_service.OnListen(record);
bFirstBlock = !bFirstBlock;
}
else
{
// calculate the number of samples remaining until we ready for a block of audio,
// and wait that amount of time it will take to record.
int remaining = bFirstBlock ? (midPoint - writePos) : (_recording.samples - writePos);
float timeRemaining = (float)remaining / (float)_recordingHZ;
yield return new WaitForSeconds(timeRemaining);
}
}
yield break;
}
private void OnRecognize(SpeechRecognitionEvent result, Dictionary<string, object> customData)
{
if (result != null && result.results.Length > 0)
{
foreach (var res in result.results)
{
foreach (var alt in res.alternatives)
{
string text = string.Format("{0} ({1}, {2:0.00})\n", alt.transcript, res.final ? "Final" : "Interim", alt.confidence);
Log.Debug("ExampleStreaming.OnRecognize()", text);
ResultsField.text = text;
if(res.final == true)
{
GolemControllerObj.GolemActions(alt.transcript);
}
}
if (res.keywords_result != null && res.keywords_result.keyword != null)
{
foreach (var keyword in res.keywords_result.keyword)
{
Log.Debug("ExampleStreaming.OnRecognize()", "keyword: {0}, confidence: {1}, start time: {2}, end time: {3}", keyword.normalized_text, keyword.confidence, keyword.start_time, keyword.end_time);
}
}
if (res.word_alternatives != null)
{
foreach (var wordAlternative in res.word_alternatives)
{
Log.Debug("ExampleStreaming.OnRecognize()", "Word alternatives found. Start time: {0} | EndTime: {1}", wordAlternative.start_time, wordAlternative.end_time);
foreach(var alternative in wordAlternative.alternatives)
Log.Debug("ExampleStreaming.OnRecognize()", "\t word: {0} | confidence: {1}", alternative.word, alternative.confidence);
}
}
}
}
}
private void OnRecognizeSpeaker(SpeakerRecognitionEvent result, Dictionary<string, object> customData)
{
if (result != null)
{
foreach (SpeakerLabelsResult labelResult in result.speaker_labels)
{
Log.Debug("ExampleStreaming.OnRecognize()", string.Format("speaker result: {0} | confidence: {3} | from: {1} | to: {2}", labelResult.speaker, labelResult.from, labelResult.to, labelResult.confidence));
}
}
}

Related

how to integrate google assistant in android for native application?

I have gone through many tutorials with API.AI But didn't get the exact solution. My requirement is simply:- user will send some command using voice or text and get that commands in my application and execute some method.
API.AI
Actions on Google
Tutorial of Google Assistant
First of all you need to train your model on API.AI to respond upon some text given to the model.
Some code with API.AI FYI:
//Initialize Service
private void initService(final LanguageConfig selectedLanguage) {
try {
final AIConfiguration.SupportedLanguages lang = AIConfiguration.SupportedLanguages.fromLanguageTag(selectedLanguage.getLanguageCode());
final AIConfiguration config = new AIConfiguration(selectedLanguage.getAccessToken(),
lang,
AIConfiguration.RecognitionEngine.System);
aiDataService = new AIDataService(this, config);
} catch (Exception e) {
e.printStackTrace();
}
}
//Send request method where you can put user typed text to get the result from API.AI
private void sendRequest(final String textToSend, final int flag) {
Log.w(TAG, "Sending" + textToSend);
final AsyncTask<String, Void, AIResponse> task = new AsyncTask<String, Void, AIResponse>() {
private AIError aiError;
#Override
protected void onPreExecute() {
super.onPreExecute();
showHideProgressBar(true);
if (mVoiceRecorder != null) {
mVoiceRecorder.pauseRecording();
}
}
#Override
protected AIResponse doInBackground(final String... params) {
final AIRequest request = new AIRequest();
String query = params[0];
String event = params[1];
if (!TextUtils.isEmpty(query))
request.setQuery(query);
if (!TextUtils.isEmpty(event)) {
request.setEvent(new AIEvent(event));
}
final String contextString = params[2];
RequestExtras requestExtras = null;
if (!TextUtils.isEmpty(contextString)) {
final List<AIContext> contexts = Collections.singletonList(new AIContext(contextString));
requestExtras = new RequestExtras(contexts, null);
}
try {
Log.i("API AI Request", "" + request.toString());
return aiDataService.request(request, requestExtras);
} catch (final AIServiceException e) {
aiError = new AIError(e);
return null;
}
}
#Override
protected void onPostExecute(final AIResponse response) {
showHideProgressBar(false);
speechSentStatus = false;
okSentStatus = false;
if (response != null) {
onResult(response, flag, textToSend);
} else {
onError(aiError);
}
}
};
if (flag == OPEN_COMPLAIN_CODE) {
task.execute("", Config.Events[0], Config.Events[0]);
} else if (flag == OPEN_DIAGNOSIS_CODE) {
task.execute("", Config.Events[1], Config.Events[1]);
} else if (flag == Constants.OPEN_MEDICATION_CODE) {
task.execute("", Config.Events[2], Config.Events[2]);
} else if (flag == Constants.OPEN_LABTEST_CODE) {
task.execute("", Config.Events[3], Config.Events[3]);
} else if (flag == Constants.COMPLAINTS_ADDED) {
task.execute("", Config.Events[0], Config.Events[0]);
} else if (flag == Constants.DIAGNOSIS_ADDED) {
task.execute("", Config.Events[1], Config.Events[1]);
} else {
task.execute(textToSend, null, "");
}
}
//Based on result you can handle the business logic
private void onResult(final AIResponse response, final int flag, final String textToSend) {
runOnUiThread(new Runnable() {
#Override
public void run() {
apiAiResponseCounter = apiAiResponseCounter + 1;
isLast = false;
final Result result = response.getResult();
Log.w(TAG, "" + result.getFulfillment().getSpeech());
if (flag == Constants.COMPLAINTS_ADDED) {
//method you want to execute on receiving certain text from model
send(textToSend.toLowerCase(), DONTTEXT);
} else if (flag == Constants.DIAGNOSIS_ADDED) {
send(textToSend.toLowerCase(), DONTTEXT);
} else {
String error = "";
final String speech = result.getFulfillment().getSpeech();
if (speech.contains("?")) {
if (!result.getAction().equalsIgnoreCase("input.unknown")) {
if (result.getAction().equalsIgnoreCase(Config.Actions[5]) && result.isActionIncomplete() == false) {
//DONOTHING
} else {
digiMessage(speech, YESNO);
}
} else {
digiMessage(speech, ChatMessageAdapter.OTHER_MESSAGE);
}
} else {
if (speech.equalsIgnoreCase("Please help me the intake duration of the medication")) {
digiMessage(speech, ChatMessageAdapter.DURATION);
} else if (speech.equalsIgnoreCase("Please provide the daily routine for the medication intake")) {
digiMessage(speech, ChatMessageAdapter.FREQUENCY);
} else {
digiMessage(speech, ChatMessageAdapter.OTHER_MESSAGE);
}
}
if (result.getAction().equalsIgnoreCase(Config.Actions[4]) || result.getAction().equalsIgnoreCase(Config.Actions[5])) {
if (result.isActionIncomplete() == true) {
playSpeech(speech);
} else {
speechBuffer = "";
speechBuffer = speech;
}
} else {
if (result.getAction().equalsIgnoreCase(Config.Actions[11])) {
isLast = true;
if (mVoiceRecorder != null) {
stopVoiceRecording();
}
} else {
playSpeech(speech);
}
}
}
}
});
if (flag == Constants.COMPLAINTS_ADDED || flag == Constants.DIAGNOSIS_ADDED) {
Log.w(TAG, "Skipped");
} else {
inflateUI(response.getResult());
}
}

Is there any way to run WorkManager's requests non-persistently?

I've switched to WorkManager and use it in almost all cases. Methods, that return LiveData by request ID, are very convenient for UI-binding.
I have a task to log in to a remote server. So, I would like to keep on using WorkManagers API for this task but do not need total DB-level persistence. I need keeping track of my task only in app lifecycle (e.x. during orientation changes) and, of cause, no posting my credentials to service DB.
Is there any way to enqueue and keep track if my background tasks in WorkManager manner but with no persistence, just for running process?
OK, I implemented my own small WorkManager based on WorkManager's classes but it's quite hard to support.
static class ThreadWorkManager extends TinyWorkManager {
static final String TAG = ThreadWorkManager.class.getSimpleName();
final ExecutorService executor = Executors.newFixedThreadPool(10);
final Executor uiExecutor = Utils::uiPost;
final HashMap<UUID, WorkData> workDatum = new HashMap<>();
final WeakHashMap<MutableLiveData<WorkInfo>, UUID> liveIdDatum = new WeakHashMap<>();
final WeakHashMap<MutableLiveData<WorkInfo>, String> liveTagDatum = new WeakHashMap<>();
static WorkInfo fromWorkData(UUID id, WorkData workData) {
return workData != null ? new WorkInfo(id, workData.spec.state, workData.spec.output, workData.tagList) : null;
}
static class WorkData {
final ListenableWorker worker;
final WorkSpec spec;
final Set<String> tags;
final List<String> tagList;
long time = System.currentTimeMillis();
Runnable startLater;
WorkData(ListenableWorker worker, WorkSpec spec) {
this.worker = worker;
this.spec = spec;
this.tags = worker.getTags();
this.tagList = new ArrayList<>(tags);
}
}
void onWorkChanged(UUID id) {
//if (BuildConfig.DEBUG && Thread.currentThread() != ui.getLooper().getThread())
// throw new AssertionError();
WorkData workData = workDatum.get(id);
if (workData == null) return;
workData.time = System.currentTimeMillis();
WorkInfo workInfo = fromWorkData(id, workData);
//TODO remove finished workdata and id livedata
while (true) {
try {
for (Map.Entry<MutableLiveData<WorkInfo>, UUID> entry : liveIdDatum.entrySet())
if (entry.getValue().equals(id))
entry.getKey().setValue(workInfo);
break;
} catch (ConcurrentModificationException e) {
Log.w(TAG, "concurrent 1");
}
}
while (true) {
try {
for (Map.Entry<MutableLiveData<WorkInfo>, String> entry : liveTagDatum.entrySet())
if (workData.tags.contains(entry.getValue()))
entry.getKey().setValue(workInfo);
break;
} catch (ConcurrentModificationException e) {
Log.w(TAG, "concurrent 2");
}
}
}
void scheduleWork(UUID id, WorkData workData, long delay) {
workData.spec.state = WorkInfo.State.ENQUEUED;
onWorkChanged(id);
uiPost(workData.startLater = () -> {
workData.startLater = null;
workData.spec.state = WorkInfo.State.RUNNING;
onWorkChanged(id);
ListenableFuture<ListenableWorker.Result> future = workData.worker.startWork();
future.addListener(new WorkerListener(id, future), uiExecutor);
}, delay);
}
#Override
public void enqueue(WorkRequest workRequest) {
//executor.submit(new WorkUnit(workRequest));
try {
Class<? extends ListenableWorker> workerClass = (Class<? extends ListenableWorker>) Class.forName(workRequest.getWorkSpec().workerClassName);
Class[] types = {Context.class, WorkerParameters.class};
Constructor constructor = workerClass.getConstructor(types);
UUID id = workRequest.getId();
WorkSpec workSpec = workRequest.getWorkSpec();
ListenableWorker worker = (ListenableWorker) constructor.newInstance(appContext, new WorkerParameters(
id,
workSpec.input,
workRequest.getTags(),
null,
workSpec.runAttemptCount,
executor,
null,
null));
WorkData workData = new WorkData(worker, workRequest.getWorkSpec());
workDatum.put(worker.getId(), workData);
if (workSpec.initialDelay > 0) {
scheduleWork(id, workData, workSpec.initialDelay);
} else {
workSpec.state = WorkInfo.State.RUNNING;
onWorkChanged(id);
ListenableFuture<ListenableWorker.Result> future = worker.startWork();
future.addListener(new WorkerListener(worker.getId(), future), uiExecutor);
}
} catch (ClassNotFoundException | NoSuchMethodException | InstantiationException | InvocationTargetException | IllegalAccessException e) {
throw new RuntimeException(e);
}
}
class WorkerListener implements Runnable {
private final UUID id;
private final ListenableFuture<ListenableWorker.Result> future;
WorkerListener(UUID id, ListenableFuture<ListenableWorker.Result> future) {
this.id = id;
this.future = future;
}
#Override
public void run() {
WorkData workData = workDatum.get(id);
if (workData == null) return;
try {
ListenableWorker.Result r = future.get();
if (r == null || r instanceof ListenableWorker.Result.Failure) {
workData.spec.state = WorkInfo.State.FAILED;
if (r != null)
workData.spec.output = ((ListenableWorker.Result.Failure) r).getOutputData();
onWorkChanged(workData.worker.getId());
} else if (r instanceof ListenableWorker.Result.Success) {
workData.spec.state = WorkInfo.State.SUCCEEDED;
workData.spec.output = ((ListenableWorker.Result.Success) r).getOutputData();
onWorkChanged(id);
if(workData.spec.isPeriodic())
scheduleWork(id, workData, workData.spec.intervalDuration);
} else if (r instanceof ListenableWorker.Result.Retry) {
workData.spec.state = WorkInfo.State.ENQUEUED;
onWorkChanged(id);
//TODO spec.backoffPolicy
scheduleWork(id, workData, workData.spec.backoffDelayDuration);
}
Log.d(TAG, workData.worker.getClass().getSimpleName() + " " + id + " " + workData.spec.state);
} catch (ExecutionException | InterruptedException e) {
Log.e(TAG, workData.worker.getClass().getSimpleName() + " " + id + " no future");
}
}
}
#Override
public WorkInfo getWorkInfo(String tag) {
WorkData tagWorkData = null;
for (WorkData workData : workDatum.values())
if (workData.tags.contains(tag) && (tagWorkData == null || tagWorkData.time < workData.time))
tagWorkData = workData;
return tagWorkData != null ? fromWorkData(UUID.fromString(tagWorkData.spec.id), tagWorkData) : null;
}
#Override
public LiveData<WorkInfo> getWorkInfoLiveData(UUID id) {
if (BuildConfig.DEBUG && Thread.currentThread() != ui.getLooper().getThread())
throw new AssertionError();
MutableLiveData<WorkInfo> liveData = null;
while (true) {
try {
for (Map.Entry<MutableLiveData<WorkInfo>, UUID> entry : liveIdDatum.entrySet())
if (entry.getValue().equals(id)) {
liveData = entry.getKey();
break;
}
break;
} catch (ConcurrentModificationException e) {
Log.w(TAG, "concurrent 3");
}
}
if (liveData == null) {
liveIdDatum.put(liveData = new MutableLiveData<>(), id);
WorkInfo workInfo = fromWorkData(id, workDatum.get(id));
if (workInfo != null)
liveData.setValue(workInfo);
}
return liveData;
}
#Override
public LiveData<WorkInfo> getWorkInfoLiveData(String tag) {
if (BuildConfig.DEBUG && Thread.currentThread() != ui.getLooper().getThread())
throw new AssertionError();
MutableLiveData<WorkInfo> liveData = null;
while (true) {
try {
for (Map.Entry<MutableLiveData<WorkInfo>, String> entry : liveTagDatum.entrySet())
if (entry.getValue().equals(tag)) {
liveData = entry.getKey();
break;
}
break;
} catch (ConcurrentModificationException e) {
Log.w(TAG, "concurrent 4");
}
}
if (liveData == null) {
liveTagDatum.put(liveData = new MutableLiveData<>(), tag);
WorkInfo workInfo = getWorkInfo(tag);
if (workInfo != null)
liveData.setValue(workInfo);
}
return liveData;
}
#Override
public void cancelWork(UUID id) {
WorkData workData = workDatum.get(id);
if (workData != null && !workData.spec.state.isFinished()) {
if (workData.startLater != null) {
uiRemove(workData.startLater);
workData.startLater = null;
}
workData.worker.stop();
workData.spec.state = WorkInfo.State.CANCELLED;
onWorkChanged(id);
}
}
#Override
public void cancelWork(String tag) {
for (WorkData workData : workDatum.values())
if (workData.tags.contains(tag))
cancelWork(UUID.fromString(workData.spec.id));
}
}
You shouldn't be using WorkManager if you don't need the tasks to persist. You should, instead, find a different solution as per the Background Processing Guide: https://developer.android.com/guide/background/

How to work with Xamarin.Android/iOS/Mono and SPP over Bluetooth?

I need use SPP over Bluetooth on Xamarin.Android/iOS/Mono. This is the code I'm trying for Xamarin.Android but the behavior is the same if I go to iOS and Mono on Linux/Mac:
using System;
using System.Collections.Generic;
using System.Text;
using Android.Bluetooth;
using Java.Util;
using System.IO;
using System.Threading;
using PI.SDK.Devices.BC.Responses;
using System.Threading.Tasks;
namespace PI.SDK.Devices.BC
{
public class BluetoothDeviceConnectionChannel : IBCDeviceConnectionChannel
{
private Queue<ResponseBase> _dispatcher;
private bool _abort = false;
private BluetoothAdapter _adapter;
private BluetoothSocket _socket;
private BluetoothDevice _device;
private static UUID _uuid = UUID.FromString("00001101-0000-1000-8000-00805F9B34FB");
private StreamReader _reader;
private StreamWriter _writer;
private string _deviceAddress;
public event Action<string> Notify;
public bool IsOpen { get { return _socket.IsConnected; } }
public BluetoothDeviceConnectionChannel(string deviceAddress)
{
_adapter = BluetoothAdapter.DefaultAdapter;
if (_adapter == null)
throw new PIDeviceManagerException("Bluetooth is not supported on this Android device");
_deviceAddress = deviceAddress;
}
public BluetoothDeviceConnectionChannel(BluetoothDevice device) : this(device.Address) { }
public void Close()
{
_socket.Close();
}
public bool Open()
{
if (!_adapter.IsEnabled)
{
throw new PIDeviceManagerException("Bluetooth is not enabled");
}
_adapter.CancelDiscovery();
_device = _adapter.GetRemoteDevice(_deviceAddress);
_socket = _device.CreateRfcommSocketToServiceRecord(_uuid);
_socket.Connect();
if (_socket.IsConnected)
{
_reader = new StreamReader(_socket.InputStream, Encoding.GetEncoding("Windows-1252"));
_writer = new StreamWriter(_socket.OutputStream, Encoding.GetEncoding("Windows-1252"));
_dispatcher = new Queue<ResponseBase>();
Task.Factory.StartNew(() => ReceiveData());
return true;
}
return false;
}
public void ReceiveData()
{
while (_socket != null && _socket.IsConnected)
{
var data = _reader.ReadToEnd();
if (string.IsNullOrWhiteSpace(data))
continue;
var dataBuffer = data.ToCharArray();
var synBuilder = new StringBuilder();
foreach (var c in dataBuffer)
{
switch (c)
{
case ControlChars.NACK:
case ControlChars.EOT:
#if DEBUG
System.Diagnostics.Debug.WriteLine($"[PINPAD -> APP] {c.ToString().Dump()}");
#endif
_abort = true;
return;
case ControlChars.ACK:
#if DEBUG
System.Diagnostics.Debug.WriteLine($"[PINPAD -> APP] {c.ToString().Dump()}");
#endif
continue;
case ControlChars.SYN:
synBuilder.Append(c);
break;
case ControlChars.ETB:
synBuilder.Append(c);
var cmdResponse = synBuilder.ToString();
#if DEBUG
System.Diagnostics.Debug.WriteLine($"[PINPAD -> APP] {cmdResponse.Dump()}");
#endif
var response = CommandResponseParser.Parse(cmdResponse);
if (response != null)
{
_dispatcher.Enqueue(response);
}
return;
default:
synBuilder.Append(c);
break;
}
}
}
}
public ResponseBase SendData(string data)
{
_abort = false;
try
{
_writer.Write(data);
}
catch
{
throw new PIException("Unable to send data to device");
}
#if DEBUG
System.Diagnostics.Debug.WriteLine($"[APP -> PINPAD] {data.Dump()}");
#endif
if (data[0] == ControlChars.CAN)
{
Thread.Sleep(100);
return null;
}
while (!_abort)
{
if (_dispatcher.Count > 0)
{
var response = _dispatcher.Dequeue();
if (response != null)
{
if (response is PPNotifyResponse)
{
if (Notify != null && Notify.GetInvocationList().Length > 0)
Notify(response.Message);
continue;
}
return response;
}
}
}
throw new InvalidOperationException("invalidData");
}
public ResponseBase SendData(CommandBase data)
{
var cmd = data.ToBCCommandString();
return SendData(cmd);
}
}
}
I want to achieve he same behavior of the code bellow for Windows using SerialPort class and a COMxxx port where this port, is nothing more than a Serial-Over-Bluetooth COM with the target device.
using PI.SDK.Devices.BC.Responses;
using System;
using System.Collections.Generic;
using System.IO.Ports;
using System.Text;
using System.Threading;
namespace PI.SDK.Devices.BC
{
public class SerialDeviceConnectionChannel : IBCDeviceConnectionChannel
{
private SerialPort _port;
private Queue<ResponseBase> _dispatcher;
private bool _abort = false;
public event Action<string> Notify;
public bool IsOpen { get { return _port.IsOpen; } }
public SerialDeviceConnectionChannel(string port)
{
_port = new SerialPort(port, 19200, Parity.None, 8, StopBits.One);
_port.ReadTimeout = 3 * 1000;
_port.WriteTimeout = 3 * 1000;
_port.Encoding = Encoding.GetEncoding(1252);
_port.DataReceived += DataReceived;
}
public void Close()
{
_port.Close();
}
public bool Open()
{
while (true)
{
try
{
_port.Open();
_port.DiscardInBuffer();
_port.DiscardInBuffer();
break;
}
catch { Console.WriteLine($"Trying to connect to {_port}"); }
}
_dispatcher = new Queue<ResponseBase>();
return _port.IsOpen;
}
private void DataReceived(object sender, SerialDataReceivedEventArgs e)
{
var data = _port.ReadExisting();
var dataBuffer = data.ToCharArray();
var synBuilder = new StringBuilder();
foreach (var c in dataBuffer)
{
switch (c)
{
case ControlChars.NACK:
case ControlChars.EOT:
#if DEBUG
Console.WriteLine($"[PINPAD -> APP] {c.ToString().Dump()}");
#endif
_abort = true;
return;
case ControlChars.ACK:
#if DEBUG
Console.WriteLine($"[PINPAD -> APP] {c.ToString().Dump()}");
#endif
continue;
case ControlChars.SYN:
synBuilder.Append(c);
break;
case ControlChars.ETB:
synBuilder.Append(c);
var cmdResponse = synBuilder.ToString();
#if DEBUG
Console.WriteLine($"[PINPAD -> APP] {cmdResponse.Dump()}");
#endif
var response = CommandResponseParser.Parse(cmdResponse);
if (response != null)
{
_dispatcher.Enqueue(response);
}
return;
default:
synBuilder.Append(c);
break;
}
}
}
public ResponseBase SendData(string data)
{
_abort = false;
try
{
_port.Write(data);
}
catch
{
throw new PIException("Unable to send data to device");
}
#if DEBUG
Console.WriteLine($"[APP -> PINPAD] {data.Dump()}");
#endif
if (data[0] == ControlChars.CAN)
{
Thread.Sleep(100);
return null;
}
while (!_abort)
{
if (_dispatcher.Count > 0)
{
var response = _dispatcher.Dequeue();
if (response != null)
{
if (response is PPNotifyResponse)
{
if (Notify != null && Notify.GetInvocationList().Length > 0)
Notify(response.Message);
continue;
}
return response;
}
}
}
throw new InvalidOperationException("invalidData");
}
public ResponseBase SendData(CommandBase data)
{
var cmd = data.ToBCCommandString();
return SendData(cmd);
}
}
}
This code hangs when calling _reader.ReadToEnd(); on all other platforms except on Windows. Looks like I'm not getting the response back somehow.
Note that the Android/iOS/Mono version, must respect the serial connection configuration as stated on the ctor of the Windows classe, and the Encoding for the messages and the Serial communication, must be Windows-1252.
Any help pointing the mistakes or how to get it work the same way as on Windows would be appreciated, since there is no SerialPort class, I'm kinda lost on those devices and looks like bluetooth comm is something obscure when talking about xamarin/mobile devices.
Thanks!
Best regards,
Gutemberg
Found the problem. While in Windows the calls to serialPort.Read() can be async and on other thread, in Android/iOS/Mono, it can't.
If I start reading just after the _writer.Write(), in other words, on the same thread, I can get it working just fine.

Settings Non-English language password on android phone?

With Reference to this question on android stack, i have a solution to do which allows android phone to provide support for setting non-english language password.
My phones SRC is based on stock-android which is not allowing me to set password which is non-ascii standards like Hebrew.
Based from AOSP source code that handles the password input for lock screen, ChooseLockPassword.java, inside validatePassword() (line 292), here is a snippet that will show the "illegal character" message (from line 311):
// allow non control Latin-1 characters only
if (c < 32 || c > 127) {
return getString(R.string.lockpassword_illegal_character);
}
I have commented out this part but i don't think so this will work. [Waiting to be Flashed]
There are no such question for this condition, i need help for cracking the possibility for doing this any "Work around" will also do.
So after fighting few days i got a workaround by implementing my method for it.
private String validateHebrewPassword(String password)
{
if (password.length() < mPasswordMinLength) {
return getString(mIsAlphaMode ?
R.string.lockpassword_password_too_short
: R.string.lockpassword_pin_too_short, mPasswordMinLength);
}
if (password.length() > mPasswordMaxLength) {
return getString(mIsAlphaMode ?
R.string.lockpassword_password_too_long
: R.string.lockpassword_pin_too_long, mPasswordMaxLength + 1);
}
for (int i = 0; i < password.length(); i++)
{
char c = password.charAt(i);
System.out.println("Validate Hebrew Password Success "+ " Char "+c+" for password "+password+ " langauage "+locale);
}
return null;
}
And modiying its validatePasswor() caller a bit specific to hebrew like:
private void handleNext() {
final String pin = mPasswordEntry.getText().toString();
if (TextUtils.isEmpty(pin)) {
return;
}
String errorMsg = null;
if (mUiStage == Stage.Introduction)
{
String locale = java.util.Locale.getDefault().getLanguage();
if(locale.equals("iw")) //Specific Hebrew check
{
errorMsg = validateHebrewPassword(pin); //New Method
}
else
{
errorMsg = validatePassword(pin); //AOSP Method
}
if (errorMsg == null)
{
mFirstPin = pin;
mPasswordEntry.setText("");
updateStage(Stage.NeedToConfirm);
}
} else if (mUiStage == Stage.NeedToConfirm) {
if (mFirstPin.equals(pin)) {
final boolean isFallback = getActivity().getIntent().getBooleanExtra(
LockPatternUtils.LOCKSCREEN_BIOMETRIC_WEAK_FALLBACK, false);
mLockPatternUtils.clearLock(isFallback);
mLockPatternUtils.saveLockPassword(pin, mRequestedQuality, isFallback);
getActivity().setResult(RESULT_FINISHED);
getActivity().finish();
} else {
CharSequence tmp = mPasswordEntry.getText();
if (tmp != null) {
Selection.setSelection((Spannable) tmp, 0, tmp.length());
}
updateStage(Stage.ConfirmWrong);
}
}
if (errorMsg != null) {
showError(errorMsg, mUiStage);
}
}
private void updateUi() {
String password = mPasswordEntry.getText().toString();
final int length = password.length();
if (mUiStage == Stage.Introduction && length > 0) {
if (length < mPasswordMinLength) {
String msg = getString(mIsAlphaMode ? R.string.lockpassword_password_too_short
: R.string.lockpassword_pin_too_short, mPasswordMinLength);
mHeaderText.setText(msg);
mNextButton.setEnabled(false);
} else
{
String locale = java.util.Locale.getDefault().getLanguage();
String error = null;
if(locale.equals("iw")) //Specific Hebrew check
{
error = validateHebrewPassword(password); //New method
}
else
{
error = validatePassword(password); //AOSP Method
}
if (error != null) {
mHeaderText.setText(error);
mNextButton.setEnabled(false);
} else {
mHeaderText.setText(R.string.lockpassword_press_continue);
mNextButton.setEnabled(true);
}
}
} else {
mHeaderText.setText(mIsAlphaMode ? mUiStage.alphaHint : mUiStage.numericHint);
mNextButton.setEnabled(length > 0);
}
mNextButton.setText(mUiStage.buttonText);
}

Using Mp4Parser, If I append more videos, the audiostream gest out of sync

As i said, with more videos I do, the audio gets more and more out of sync. How can i fix this? I have the following code to append the videos;
public class ConcatenateVideos extends ExecutorAsyncTask<String, Void, Boolean> {
private ArrayList<String> video_urls = null;
private final String TAG = ConcatenateVideos.class.getSimpleName();
public void setUris(ArrayList<String> videos) {
LogService.log(TAG, "set uris");
if (videos != null) {
video_urls = videos;
this.execute();
}
}
#Override
protected Boolean doInBackground(String... params) {
boolean success = false;
FileInputStream[] videos = new FileInputStream[video_urls.size()];
try {
for (int i = 0; i < video_urls.size(); i++) {
videos[i] = new FileInputStream(video_urls.get(i));
}
success = concatenateVideos(videos);
} catch (Exception e) {
success = false;
LogService.err(TAG, e.getMessage(), e);
}
return success;
}
private boolean concatenateVideos(InputStream[] video_streams) {
boolean success = false;
Movie[] inMovies = new Movie[video_streams.length];
FileChannel fc = null;
Movie result = new Movie();
IsoFile out = null;
try {
for (int i = 0; i < inMovies.length; i++) {
if (video_streams[i] != null) {
inMovies[i] = MovieCreator.build(Channels.newChannel(video_streams[i]));
}
}
List<Track> videoTracks = new LinkedList<Track>();
List<Track> audioTracks = new LinkedList<Track>();
for (Movie m : inMovies) {
for (Track t : m.getTracks()) {
if (t.getHandler().equals("soun")) {
audioTracks.add(t);
}
if (t.getHandler().equals("vide")) {
videoTracks.add(t);
}
}
}
if (audioTracks.size() > 0) {
result.addTrack(new AppendTrack(audioTracks.toArray(new Track[audioTracks.size()])));
}
if (videoTracks.size() > 0) {
result.addTrack(new AppendTrack(videoTracks.toArray(new Track[videoTracks.size()])));
}
out = new DefaultMp4Builder().build(result);
fc = new RandomAccessFile(video_urls.get(0), "rw").getChannel();
for (int i = 1; i < video_urls.size(); i++) {
File f = new File(video_urls.get(i));
LogService.log(TAG, "delete file : " + f.delete());
}
success = true;
} catch (Exception e) {
LogService.err(TAG, e.getMessage(), e);
success = false;
} finally {
try {
LogService.log(TAG, "==========finally");
if (fc != null) {
fc.position(0);
out.getBox(fc);
fc.close();
}
} catch (Exception e) {
LogService.err(TAG, e.getMessage(), e);
}
}
return success;
}
}
And this is the Service I use to call this ConcatenateVideos function:
private final String TAG = ConcatenateVideosService.class.getSimpleName();
final Messenger myMessenger = new Messenger(new IncomingHandler());
class IncomingHandler extends Handler {
private Messenger client = null;
#Override
public void handleMessage(Message msg) {
// init messenger
if (client == null) {
client = msg.replyTo;
}
// get the message
Bundle data = msg.getData();
byte dataString = data.getByte("message");
switch (dataString) {
case Constants.INIT_CMD_SERVICE:
LogService.log(TAG, "INIT_CMD_SERVICE:");
break;
case Constants.CONCATE_CMD_SERVICE:
LogService.log(TAG, "CONCATE_CMD_SERVICE:");
ArrayList<String> videos = data.getStringArrayList(Constants.SERVICE_VIDEO_URLS);
ConcatenateVideos concatenateVideos = new ConcatenateVideos() {
#Override
protected void onPostExecute(Boolean result) {
LogService.log(TAG, "onPostExecute() , result : " + result);
super.onPostExecute(result);
// setup the answer
Message answer = Message.obtain();
Bundle bundle = new Bundle();
bundle.putBoolean("result", result);
answer.setData(bundle);
// send the answer
try {
client.send(answer);
} catch (RemoteException e) {
LogService.err(TAG, e.getMessage(), e);
}
}
};
concatenateVideos.setUris(videos);
break;
}
}
}
#Override
public boolean onUnbind(Intent intent) {
stopSelf();
return super.onUnbind(intent);
}
#Override
public IBinder onBind(Intent intent) {
return myMessenger.getBinder();
}
#Override
public void onDestroy() {
super.onDestroy();
}
My videos are recorded at he following quality: VideoBitrate - 800000, audioBR - 64000, audioSamplingRate - 44100, MPEG_4. H264 Container, .AAC at 30fps.
Now I made a test, and if I make 4 videos, the video Timescale is 90000, audio Timescale is 44100 for every video.
But after appending the videos, the audio TimeScale of the videos is still 44100, but the video Timescale is: 900. Why does the VideoTimeScale change and not the audio one?
In many case the recordings (audio / video) have different lengths. Let's say audio recordings are always 10.0 s and video are always 10.1 s. If you just play one movie like that audio may end before the video. It's kind of automatically silenced.
If you add two of these videos the first audio starts at 0 s and the second 10.0 - unfortunately the second video starts are 10.1 and voilĂ  you got a sync problem.
You will need to compensate the different run lengths by appending silence or even by dropping some frames!
I know this question is old but I faced same problem without a clear solution and taking code from here and there I made a couple of functions to solve this problem.
#Throws(Exception::class)
fun appendVideos(videoPathList: List<String>, targetFilePath: String) {
val movies = videoPathList.flatMap { file -> listOf(MovieCreator.build(file)) }
val finalMovie = Movie()
val videoTracksTotal = mutableListOf<Track>()
val audioTracksTotal = mutableListOf<Track>()
var audioDuration = 0.0
var videoDuration = 0.0
movies.forEach { movie ->
val videoTracks = mutableListOf<Track>()
val audioTracks = mutableListOf<Track>()
movie.tracks.forEach { track ->
val trackDuration = track.sampleDurations.toList()
.map { t -> t.toDouble() / track.trackMetaData.timescale }.sum()
if (track.handler == "vide") {
videoDuration += trackDuration
videoTracks.add(track)
} else if (track.handler == "soun") {
audioDuration += trackDuration
audioTracks.add(track)
}
}
// Adjusting Durations
adjustDurations(videoTracks, audioTracks, videoDuration, audioDuration).let {
audioDuration = it.audioDuration
videoDuration = it.videoDuration
}
videoTracksTotal.addAll(videoTracks)
audioTracksTotal.addAll(audioTracks)
}
if (videoTracksTotal.isNotEmpty() && audioTracksTotal.isNotEmpty()) {
finalMovie.addTrack(AppendTrack(*videoTracksTotal.toTypedArray()))
finalMovie.addTrack(AppendTrack(*audioTracksTotal.toTypedArray()))
}
val container = DefaultMp4Builder().build(finalMovie)
val fos = FileOutputStream(targetFilePath)
val bb = Channels.newChannel(fos)
container.writeContainer(bb)
fos.close()
}
class Durations(val audioDuration: Double, val videoDuration: Double)
private fun adjustDurations(
videoTracks: MutableList<Track>,
audioTracks: MutableList<Track>,
videoDuration: Double,
audioDuration: Double
): Durations {
var diff = audioDuration - videoDuration
val tracks: MutableList<Track>
var durationOperator: Double
val isAudioProblem: Boolean
when {
// audio and video match, no operations to perform
diff == 0.0 -> {
return Durations(audioDuration, videoDuration)
}
// audio tracks are longer than video
diff > 0 -> {
tracks = audioTracks
durationOperator = audioDuration
isAudioProblem = true
}
// video tracks are longer than audio
else -> {
tracks = videoTracks
durationOperator = videoDuration
diff *= -1.0
isAudioProblem = false
}
}
// Getting the last track in order to operate with it
var track: Track = tracks.last()
var counter: Long = 0
// Reversing SampleDuration list
track.sampleDurations.toList().asReversed().forEach { sampleDuration ->
// Calculating how much this track need to be re-adjusted
if (sampleDuration.toDouble() / track.trackMetaData.timescale > diff) {
return#forEach
}
diff -= sampleDuration.toDouble() / track.trackMetaData.timescale
durationOperator -= sampleDuration.toDouble() / track.trackMetaData.timescale
counter++
}
if (counter != 0L) {
// Cropping track
track = CroppedTrack(track, 0, track.samples.size - counter)
//update the original reference
tracks.removeAt(tracks.lastIndex)
tracks.add(track)
}
// Returning durations
return if (isAudioProblem) {
Durations(durationOperator, videoDuration)
} else {
Durations(audioDuration, durationOperator)
}
}

Categories

Resources