Android TextToSpeech in Chrome ARC - android

I'm trying to port my android app to Chrome using ARCWelder. The TextToSpeech component doesn't seem to work. In one activity I have an indeterminate progress circle waiting until the TTS engine is initialized. On Chrome, it either spins forever or returns a NullPointerException. Is TTS not available in Chrome? Running ChromeOS on a Chromebox.
UtteranceProgressListener ttsListener = new UtteranceProgressListener() {
#Override
public void onStart(String s) {
Logg.d("speech started: " + s);
if (loadingDialog.isShowing()) {
loadingDialog.dismiss();
}
}
#Override
public void onDone(String s) {
Logg.d("speech done: " + s);
if (s.equals("1")) {
nextWord();
} else if (s.equals("2")) {
CheckLeave();
}
}
#Override
public void onError(String s) {
Logg.e("Text to Speech error speaking: " + s);
}
};
#Override
protected void onCreate(Bundle savedInstanceState) {
showProgressDialog();
}
#Override
protected void onResume() {
if (tts==null) {
Logg.d("re-initializing TTS");
tts=new TextToSpeech(getApplicationContext(),
new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if(status != TextToSpeech.ERROR){
tts.setSpeechRate(.5f + .25f * (Integer)KVDB.GetValue("speechRate",2));
tts.setLanguage(Locale.US);
if (pauseTime != 0) {
//Paused. Say nothing.
} else if (currentWord == null) {
startTime = new ExcelDate();
nextWord();
} else if (currentWord.length() == 0) {
nextWord();
} else {
reSpeak();
}
}
}
});
tts.setOnUtteranceProgressListener(ttsListener);
}
super.onResume();
}
private void showProgressDialog() {
loadingDialog = new ProgressDialog(this);
loadingDialog.setProgressStyle(ProgressDialog.STYLE_SPINNER);
loadingDialog.setTitle(getString(R.string.test_loading_msg));
loadingDialog.show();
}

As you found, it does look like we don't have any kind of default TTS service provider as part of the ARC package. There is not one provided by the base Android OS at all.
Please feel free to file a bug for it.

Related

synthesizeToFile failed in Android

TextToSpeech is initialized like this in onCreate()
tts = new TextToSpeech(this, this);
onInit coded like this.
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS) {
tts.setOnUtteranceProgressListener(new UtteranceProgressListener() {
#Override
public void onStart(String s) {
Log.v(TAG, "onStart : " + s);
}
#Override
public void onDone(String s) {
tts.setLanguage(Locale.US);
fabSpeak.setEnabled(true);
Log.v(TAG, "Proceed");
}
#Override
public void onError(String s) {
Log.v(TAG, "onError : " + s);
}
});
Log.v(TAG, "Proceed2");
} else {
Log.e(TAG, "Initilization Failed!");
}
}
only Proceed2 printed. Proceed is printed only on when I call
tts.speak(text, TextToSpeech.QUEUE_FLUSH, null, "1");
If I synthesizeToFile without call speak like this
int test = tts.synthesizeToFile(text, null, file, "tts");
if (test == TextToSpeech.SUCCESS) {
Log.v(TAG, "Success");
}
This log prints Success, but file is empty. If I synthesize file after called speak then file has data.
But I want to synthesizeToFile without call speak. I don't know what is wrong here.

Multiple Instances for TextToSpeech

I need to speak multiple language. I created an array for TextToSpeech.
private TextToSpeech[] mTextSpeechs_;
mTextSpeechs_ = new TextToSpeech[5];
mTextSpeechs_[0] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[0].setLanguage(Locale.CHINESE);
mTextSpeechs_[0].speak(getString(R.string.string_main_chineseready), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[1] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[1].setLanguage(Locale.forLanguageTag("yue-HK"));
mTextSpeechs_[1].speak(getString(R.string.string_main_hongkong), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[2] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[2].setLanguage(Locale.JAPAN);
mTextSpeechs_[2].speak(getString(R.string.string_main_japan), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[3] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[3].setLanguage(Locale.KOREA);
mTextSpeechs_[3].speak(getString(R.string.string_main_korea), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[4] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[4].setLanguage(Locale.ENGLISH);
mTextSpeechs_[4].speak(getString(R.string.string_main_english), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
....
//type 0 flush
//type 1 add
public void speakMultiLanguage(String text, int type,int langidx)
{
if( type == 0 )
mTextSpeechs_[langidx_].speak(text, TextToSpeech.QUEUE_FLUSH, null,"Display");
else if( type == 1)
mTextSpeechs_[langidx_].speak(text, TextToSpeech.QUEUE_ADD, null,"Display");
}
Now, When I call speakMultiLanguage function to speak specified language, it will delay about 5 seconds to speak. If the last language is same, it will not delay. Does anyone give me a solution to solve the delay?
I wasn't testing your particular case of multiple usage of TTS in array. But for me it looks none productive, especially memory and resources using.
In my case I initialise only one TTS object in onCreate() method of My Activity. And when I need to change languge to another, I'll use tts.setLanguge(Locale).
In my case and with my virtual device this solution working immediately.
initialise:
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
tts = new TextToSpeech(this, new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS){
tts.setLanguage(Locale.ENGLISH);
}else{
Log.e(TAG, "TTS fault");
}
}
});
//....
}
Change:
#RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
private void prepareTTS(Locale newLocale){
if (!tts.getVoice().getLocale().getISO3Language().equals(newLocale.getISO3Language())) {
tts.setLanguage(newLocale);
Log.d(TAG, "ChangeTo: " + newLocale.getISO3Language());
}else{
Log.d(TAG, "The same");
}
}
speech:
#TargetApi(Build.VERSION_CODES.LOLLIPOP)
private void ttsSpeak21(String text){
String utteranceId = this.hashCode() + "";
int result = tts.speak(text, TextToSpeech.QUEUE_FLUSH, null, utteranceId);
if (result == TextToSpeech.ERROR){
Log.d(TAG, "Can't say");
}else {
Log.d(TAG, lang + "speaking!");
}
}
free resources:
#Override
protected void onDestroy() {
if (tts != null){
tts.stop();
tts.shutdown();
}
super.onDestroy();
}

android pocketsphinx not sending results or partial results

See code below. Pocketsphinx is configured with a keyphrase search to trigger on the word "record". Searching is then started, and talking causes onBeginningOfSpeech and onEndOfSpeech to be called, but no other listener methods get called, whatever I say.
public class MainActivity extends AppCompatActivity implements RecognitionListener {
private final Handler handler = new Handler ();
private SpeechRecognizer recognizer;
private final static String KEYWORD = "record";
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_start);
makeButtonStartButton ();
ensureRecordAudioPermission ();
startKeywordListener ();
}
private void startKeywordListener() {
// Recognizer initialization is a time-consuming and it involves IO,
// so we execute it in async task
new AsyncTask<Void, Void, Exception>() {
#Override
protected Exception doInBackground(Void... params) {
try {
Assets assets = new Assets(MainActivity.this);
File assetDir = assets.syncAssets();
setupRecognizer(assetDir);
} catch (IOException e) {
return e;
}
return null;
}
#Override
protected void onPostExecute(Exception result) {
if (result != null) {
Log.e("MainActivity", "Failed to init recognizer " + result);
} else {
startKeywordSearch ();
}
}
}.execute();
}
private void startKeywordSearch() {
Log.i("MainActivity", "Starting keyword search: " + KEYWORD);
recognizer.stop();
recognizer.startListening(KEYWORD);
}
private void setupRecognizer(File assetsDir) throws IOException {
// The recognizer can be configured to perform multiple searches
// of different kind and switch between them
recognizer = SpeechRecognizerSetup.defaultSetup()
.setAcousticModel(new File(assetsDir, "en-us-ptm"))
.setDictionary(new File(assetsDir, "cmudict-en-us.dict"))
.setRawLogDir(assetsDir) // To disable logging of raw audio comment out this call (takes a lot of space on the device)
.getRecognizer();
recognizer.addListener(this);
// Create keyword-activation search.
recognizer.addKeyphraseSearch(KEYWORD, KEYWORD);
}
private void ensureRecordAudioPermission() {
// Check if user has given permission to record audio
int permissionCheck = ContextCompat.checkSelfPermission(getApplicationContext(), Manifest.permission.RECORD_AUDIO);
if (permissionCheck == PackageManager.PERMISSION_DENIED) {
ActivityCompat.requestPermissions(this, new String[]{Manifest.permission.RECORD_AUDIO}, 1);
return;
}
}
#Override
public void onRequestPermissionsResult(int requestCode,
String[] permissions, int[] grantResults) {
super.onRequestPermissionsResult(requestCode, permissions, grantResults);
if (requestCode == 1) {
if (grantResults.length > 0 && grantResults[0] == PackageManager.PERMISSION_GRANTED) {
startKeywordListener();
} else {
finish();
}
}
}
#Override
public void onDestroy() {
super.onDestroy();
if (recognizer != null) {
recognizer.cancel();
recognizer.shutdown();
}
}
private void makeButtonStartButton() {
findViewById(R.id.startButton).setOnClickListener(startRecording);
}
private final View.OnClickListener startRecording = new View.OnClickListener() {
#Override
public void onClick(View v) {
startActivity(new Intent(MainActivity.this, RecordingActivity.class));
}
};
#Override
public void onBeginningOfSpeech() {
Log.i ("MainActivity", "Beginning of speech detected");
}
#Override
public void onEndOfSpeech() {
Log.i ("MainActivity", "End of speech detected");
}
#Override
public void onPartialResult(Hypothesis hypothesis) {
if (hypothesis == null) return; // reject the null hypothesis (!)
Log.i ("MainActivity", "Partial result: " + hypothesis.getHypstr() + " (" + hypothesis.getProb() + ")");
if (hypothesis.getHypstr().equals(KEYWORD))
startRecording.onClick(null);
}
#Override
public void onResult(Hypothesis hypothesis) {
if (hypothesis == null) return; // reject the null hypothesis (!)
Log.i ("MainActivity", "Complete result: " + hypothesis.getHypstr() + " (" + hypothesis.getProb() + ")");
if (hypothesis.getHypstr().equals(KEYWORD))
startRecording.onClick(null);
}
#Override
public void onError(Exception e) {
Log.i ("MainActivity", "Error detected", e);
}
#Override
public void onTimeout() {
Log.i ("MainActivity", "Timeout occurred");
}
}

Text to Speech is not working in the activity (android)

I am developing an activity that reads the balance for the user. When the user enter the Balance activity it should read the balance immediately. But that's not working for me here is the code
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_balance);
EditText editTextTotalBalance= (EditText) findViewById(R.id.editTextBalanceNow);
new Thread(new Runnable() {
public void run() {
myTTS=new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status)
{
if(status != TextToSpeech.ERROR)
{
myTTS.setLanguage(Locale.UK);
}
}
});
}
}).start();
// take the string from the edit text to read the balance
String speech=editTextTotalBalance.getText().toString();
if(Build.VERSION.SDK_INT >= Build.VERSION_CODES.LOLLIPOP)
{
ttsGreater21(speech);
}
else {
ttsUnder20(speech);
}
}// end of onCreate method
#SuppressWarnings("deprecation")
private void ttsUnder20(final String speech) {
Handler mainHandler = new Handler(Looper.getMainLooper());
Runnable myRunnable = new Runnable() {
#Override
public void run() {
try {
HashMap<String, String> map = new HashMap<>();
map.put(TextToSpeech.Engine.KEY_PARAM_UTTERANCE_ID, "MessageId");
myTTS.speak(speech, TextToSpeech.QUEUE_FLUSH, map);
}
catch(Exception excep){
}
}
};
mainHandler.post(myRunnable);
}
#TargetApi(Build.VERSION_CODES.LOLLIPOP)
private void ttsGreater21(final String speech) {
Handler mainHandler = new Handler(Looper.getMainLooper());
Runnable myRunnable = new Runnable() {
#Override
public void run() {
try {
String utteranceId=this.hashCode() + "";
myTTS.speak(speech, TextToSpeech.QUEUE_FLUSH, null, utteranceId);
}
catch(Exception excep){
}
}
};
mainHandler.post(myRunnable);
}
public void onPause(){
if(myTTS !=null){
myTTS.stop();
myTTS.shutdown();
}
super.onPause();
}
Also, it shows me an error says Skipped 81 frames! The application may be doing too much work on its main thread. for that I add many thread in my code.
Also, I check for the API version because when I implemented it regularly I found there are some SmartPhones are not working well for the speech and it let the app crushed.
Thank you looking forward for your help,

Handling Errors in PocketSphinx Android app

I am using the default dictionary that comes with the pocketsphinx demo which is good for my purposes. When a user enters a phrase, the app starts a keyphrase listening but if the word is not found in the dictionary the app crashes. The app crashes onError() within a service. How is the error handling done? is there any way I can catch the error? Overall I would just like the service to call stopSelf() when an error happens so the main activity won't crash as well.
Errors:
ERROR: "kws_search.c", line 165: The word 'phonez' is missing in the dictionary
Fatal signal 11 (SIGSEGV) at 0x00000000 (code=1), thread 5389 (1994.wherephone)
Here is my service class:
public class WherePhoneService extends Service implements RecognitionListener {
private static String SettingStorage = "SavedData";
SharedPreferences settingData;
private SpeechRecognizer recognizer;
private String sInput;
private String sOutput;
private int seekVal;
private TextToSpeech reply;
private AsyncTask t;
public WherePhoneService() {
}
#Override
public IBinder onBind(Intent intent) {
// TODO: Return the communication channel to the service.
throw new UnsupportedOperationException("Not yet implemented");
}
#Override
public int onStartCommand(Intent intent, int flags, int startId) {
makeText(getApplicationContext(), "onHandle start", Toast.LENGTH_SHORT).show();
getValues();
startTTS();
t = new AsyncTask<Void, Void, Exception>() {
#Override
protected Exception doInBackground(Void... params) {
try {
Assets assets = new Assets(WherePhoneService.this);
File assetDir = assets.syncAssets();
setupRecognizer(assetDir);
} catch (IOException e) {
return e;
}
return null;
}
#Override
protected void onPostExecute(Exception result) {
if (result != null) {
//((TextView) findViewById(R.id.caption_text)).setText("Failed to init recognizer " + result);
} else {
switchSearch(sInput);
}
}
}.execute();
return Service.START_STICKY;
}
private void setupRecognizer(File assetsDir) throws IOException {
recognizer = defaultSetup()
.setAcousticModel(new File(assetsDir, "en-us-ptm"))
.setDictionary(new File(assetsDir, "cmudict-en-us.dict"))
// To disable logging of raw audio comment out this call (takes a lot of space on the device)
//.setRawLogDir(assetsDir)
// Threshold to tune for keyphrase to balance between false alarms and misses
.setKeywordThreshold(1e-45f)
// Use context-independent phonetic search, context-dependent is too slow for mobile
.setBoolean("-allphone_ci", true)
.getRecognizer();
recognizer.addListener(this);
// Create keyword-activation search.
recognizer.addKeyphraseSearch(sInput, sInput);
}
private void switchSearch(String searchName) {
recognizer.stop();
// If we are not spotting, start listening with timeout (10000 ms or 10 seconds).
if (searchName.equals(sInput))
recognizer.startListening(searchName);
else
recognizer.startListening(searchName, 10000);
}
#Override
public void onBeginningOfSpeech() {
}
#Override
public void onEndOfSpeech() {
if (!recognizer.getSearchName().equals(sInput))
switchSearch(sInput);
}
#Override
public void onPartialResult(Hypothesis hypothesis) {
if (hypothesis == null)
return;
String text = hypothesis.getHypstr();
makeText(getApplicationContext(), "Partial", Toast.LENGTH_SHORT).show();
if (text.equals(sInput)) {
setVolume();
// Text to speech
reply.speak(sOutput, TextToSpeech.QUEUE_ADD, null);
switchSearch(sInput);
}
else {
makeText(getApplicationContext(), "Try again", Toast.LENGTH_SHORT).show();
switchSearch(sInput);
}
}
#Override
public void onResult(Hypothesis hypothesis) {
if (hypothesis != null) {
// restart listener and affirm that partial has past
makeText(getApplicationContext(), "end", Toast.LENGTH_SHORT).show();
//recognizer.startListening(sInput);
switchSearch(sInput);
}
}
public void onError(Exception e) {
e.printStackTrace(); // not all Android versions will print the stack trace automatically
Intent intent = new Intent ();
intent.setAction ("com.mydomain.SEND_LOG"); // see step 5.
intent.setFlags (Intent.FLAG_ACTIVITY_NEW_TASK); // required when starting from Application
startActivity (intent);
stopSelf();
}
#Override
public void onTimeout() {
switchSearch(sInput);
}
public void startTTS() {
reply = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if(status != TextToSpeech.ERROR){
reply.setLanguage(Locale.UK);
}
}
});
}
public void getValues() {
settingData = getBaseContext().getSharedPreferences(SettingStorage, 0);
sInput = settingData.getString("inputstring", "Where is my phone").toString().toLowerCase().replaceAll("[^\\w\\s]", "");
sOutput = settingData.getString("outputstring", "").toString().toLowerCase();
seekVal = settingData.getInt("seekval", 0);
}
public void setVolume() {
int seekValConvert = 0;
AudioManager audioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
int getMaxPhoneVol = audioManager.getStreamMaxVolume(audioManager.STREAM_MUSIC);
seekValConvert = ((seekVal * getMaxPhoneVol)/100);
audioManager.setStreamVolume(audioManager.STREAM_MUSIC, seekValConvert, 0);
}
#Override
public void onDestroy() {
super.onDestroy();
makeText(getApplicationContext(), "destroy", Toast.LENGTH_SHORT).show();
recognizer.cancel();
recognizer.shutdown();
t.cancel(true);
}
}
Crash is a bug in pocketsphinx-android. If you update to latest version from github, it should properly throw RuntimeException on any errors in methods addKeyphrase and setSearch.

Categories

Resources