Multiple Instances for TextToSpeech - android

I need to speak multiple language. I created an array for TextToSpeech.
private TextToSpeech[] mTextSpeechs_;
mTextSpeechs_ = new TextToSpeech[5];
mTextSpeechs_[0] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[0].setLanguage(Locale.CHINESE);
mTextSpeechs_[0].speak(getString(R.string.string_main_chineseready), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[1] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[1].setLanguage(Locale.forLanguageTag("yue-HK"));
mTextSpeechs_[1].speak(getString(R.string.string_main_hongkong), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[2] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[2].setLanguage(Locale.JAPAN);
mTextSpeechs_[2].speak(getString(R.string.string_main_japan), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[3] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[3].setLanguage(Locale.KOREA);
mTextSpeechs_[3].speak(getString(R.string.string_main_korea), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
mTextSpeechs_[4] = new TextToSpeech(this, new TextToSpeech.OnInitListener()
{
#Override
public void onInit(int status)
{
mTextSpeechs_[4].setLanguage(Locale.ENGLISH);
mTextSpeechs_[4].speak(getString(R.string.string_main_english), TextToSpeech.QUEUE_FLUSH, null,"Display");
}
});
....
//type 0 flush
//type 1 add
public void speakMultiLanguage(String text, int type,int langidx)
{
if( type == 0 )
mTextSpeechs_[langidx_].speak(text, TextToSpeech.QUEUE_FLUSH, null,"Display");
else if( type == 1)
mTextSpeechs_[langidx_].speak(text, TextToSpeech.QUEUE_ADD, null,"Display");
}
Now, When I call speakMultiLanguage function to speak specified language, it will delay about 5 seconds to speak. If the last language is same, it will not delay. Does anyone give me a solution to solve the delay?

I wasn't testing your particular case of multiple usage of TTS in array. But for me it looks none productive, especially memory and resources using.
In my case I initialise only one TTS object in onCreate() method of My Activity. And when I need to change languge to another, I'll use tts.setLanguge(Locale).
In my case and with my virtual device this solution working immediately.
initialise:
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
tts = new TextToSpeech(this, new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS){
tts.setLanguage(Locale.ENGLISH);
}else{
Log.e(TAG, "TTS fault");
}
}
});
//....
}
Change:
#RequiresApi(api = Build.VERSION_CODES.LOLLIPOP)
private void prepareTTS(Locale newLocale){
if (!tts.getVoice().getLocale().getISO3Language().equals(newLocale.getISO3Language())) {
tts.setLanguage(newLocale);
Log.d(TAG, "ChangeTo: " + newLocale.getISO3Language());
}else{
Log.d(TAG, "The same");
}
}
speech:
#TargetApi(Build.VERSION_CODES.LOLLIPOP)
private void ttsSpeak21(String text){
String utteranceId = this.hashCode() + "";
int result = tts.speak(text, TextToSpeech.QUEUE_FLUSH, null, utteranceId);
if (result == TextToSpeech.ERROR){
Log.d(TAG, "Can't say");
}else {
Log.d(TAG, lang + "speaking!");
}
}
free resources:
#Override
protected void onDestroy() {
if (tts != null){
tts.stop();
tts.shutdown();
}
super.onDestroy();
}

Related

Restart SpeechRecognizer using onUtteranceProgressListener

I've been working on this code for weeks with no progress at all, even though it seems like such an easy fix!! Basically I am using TextToSpeech with SpeechRecognizer in order to have the app ask me a question and then listen for my answer. Here's my code... I'm thinking its a thread problem or something.
Basically when I hold down the button, SPeechRecognizer starts listening until i take away finger... this works fine.. but I programmatically want the SpeechRecognizer to start up again after Utterance is Completed or simulate a button press to get it to work but neither works...
public class MainActivity extends AppCompatActivity {
TextToSpeech textToSpeech;
// SpeechRecognizer
SpeechRecognizer mSpeechRecognizer;
Intent mSpeechRecognizerIntent;
// Speech Analysis Support
SpeechToTextAnalyzer speechToTextAnalyzer;
String speechToTextAnalyzerResponse;
String SPEECH_RESPONSE_BACK;
int buttonPressCounter;
boolean responsive;
TextView StatusDisplay, WordingDisplay;
Button SimulateButton;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
checkPermission();
buttonPressCounter = 0;
// Initialize TextToSpeech
textToSpeech = new TextToSpeech(getApplicationContext(), new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if(status != TextToSpeech.ERROR) {
textToSpeech.setLanguage(Locale.CANADA);
}
textToSpeech.setOnUtteranceProgressListener(new UtteranceProgressListener() {
#Override
public void onStart(String utteranceId) {
// Speaking started.
}
#Override
public void onDone(String utteranceId) {
// Speaking stopped.
if(responsive){ // if(utteranceId == "LISTEN_FOR_NEW_RESPONSE")
// possible method one:
SimulateButton.performClick();
// possible method two:
//mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
// NOTE: SO FAR NEITHER OF THESE LINES MAKE THE SPEECHRECOGNIZER START UP WITHOUT ME TOUCHING THE BUTTON!!!
}
}
#Override
public void onError(String utteranceId) {
// There was an error.
}
});
}
});
// Initialize UI View Components
StatusDisplay = (TextView)findViewById(R.id.StatusUpdater);
WordingDisplay = (TextView)findViewById(R.id.WordsHeard);
// Create Reference to Classes
speechToTextAnalyzer = new SpeechToTextAnalyzer(this, this);
// Initialize SpeechRecognizer and Intent
final SpeechRecognizer mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
final Intent mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE,
Locale.getDefault());
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_MAX_RESULTS, 1);
mSpeechRecognizer.setRecognitionListener(new RecognitionListener() {
#Override
public void onReadyForSpeech(Bundle bundle) {
}
#Override
public void onBeginningOfSpeech() {
}
#Override
public void onRmsChanged(float v) {
}
#Override
public void onBufferReceived(byte[] bytes) {
}
#Override
public void onEndOfSpeech() {
}
#Override
public void onError(int errorCode) {
String errorMessage = getErrorText(errorCode);
//Log.d(LOG_TAG, "FAILED " + errorMessage);
//listeningOn = false; // UNSURE!!
}
// accompanying text for onError
public String getErrorText(int errorCode) {
String message;
switch (errorCode) {
case SpeechRecognizer.ERROR_AUDIO:
message = "Audio Recording Error";
break;
case SpeechRecognizer.ERROR_CLIENT:
message = "Client Side Error";
break;
case SpeechRecognizer.ERROR_INSUFFICIENT_PERMISSIONS:
message = "Insufficient Permissions";
break;
case SpeechRecognizer.ERROR_NETWORK:
message = "Network Error";
break;
case SpeechRecognizer.ERROR_NETWORK_TIMEOUT:
message = "Network TimeOut";
break;
case SpeechRecognizer.ERROR_NO_MATCH:
message = "No Match";
break;
case SpeechRecognizer.ERROR_RECOGNIZER_BUSY:
message = "RecognitionService Busy";
break;
case SpeechRecognizer.ERROR_SERVER:
message = "Error From Server";
break;
case SpeechRecognizer.ERROR_SPEECH_TIMEOUT:
message = "No Speech Input";
break;
default:
message = "Couldn't Understand, Please Try Again";
break;
}
return message;
}
#Override
public void onResults(Bundle bundle) {
//getting all the matches
ArrayList<String> matches = bundle
.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
String firstMatch = "";
//displaying the first match
if (matches != null) {
for(int i = 0; i < matches.size(); i++){
firstMatch = matches.get(i); // get the last one, which has the actual words I spoke...
}
matches.clear();
}
//StatusDisplay.setText("Not Listening...");
// ENTRY POINT FOR ALL COMMAND FUNCTIONALITY
speechToTextAnalyzerResponse = speechToTextAnalyzer.AnalyzeSpeechSingleResponse(firstMatch);
buttonPressCounter = speechToTextAnalyzer.AnalyzeSpeechCounter(buttonPressCounter);
responsive = speechToTextAnalyzer.AnalyzeSpeechResponseThenListen(buttonPressCounter);
SPEECH_RESPONSE_BACK = speechToTextAnalyzerResponse;
WordingDisplay.setText(SPEECH_RESPONSE_BACK);
if(responsive){
RESPONSIVE_SPEECH("... okay now say something new ... ");
}
else {
SINGLE_RESPONSE();
}
}
#Override
public void onPartialResults(Bundle bundle) {
}
#Override
public void onEvent(int i, Bundle bundle) {
}
}); // end of mSpeechRecognizer.setRecognitionListener
SimulateButton = (Button)findViewById(R.id.DoSomething);
SimulateButton.setOnTouchListener(new View.OnTouchListener() {
#Override
public boolean onTouch(View view, MotionEvent motionEvent) {
switch (motionEvent.getAction()) {
case MotionEvent.ACTION_UP:
//when the user removed the finger
mSpeechRecognizer.stopListening();
StatusDisplay.setText("Not Listening...");
break;
case MotionEvent.ACTION_DOWN:
//finger is on the button
StatusDisplay.setText("Listening...");
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
// WHILE I HOLD BUTTON THIS ACTUALLY WORKS BUT NOT INSIDE ONUTTERANCEPROGRESSLISTENER.........
break;
}
return true;
}
});
} // end of onCreate()
public void SINGLE_RESPONSE(){
if (!textToSpeech.isSpeaking()) {
textToSpeech.speak(SPEECH_RESPONSE_BACK, TextToSpeech.QUEUE_FLUSH, null);
//textToSpeech.speak(WordingDisplay.getText().toString(), TextToSpeech.QUEUE_FLUSH, null);
} else {
textToSpeech.stop();
}
}
public void RESPONSIVE_SPEECH(String PleaseSayThis){
SPEECH_RESPONSE_BACK = PleaseSayThis;
// https://android-developers.googleblog.com/2009/09/introduction-to-text-to-speech-in.html
if (!textToSpeech.isSpeaking()) {
HashMap<String, String> stringStringHashMap = new HashMap<String, String>();
stringStringHashMap.put(TextToSpeech.Engine.KEY_PARAM_UTTERANCE_ID, "LISTEN_FOR_NEW_RESPONSE"); // when using normal text to speech, the ID is "ID_MAIN_CALL" to be searched for in onUtteranceCompleted
textToSpeech.speak(SPEECH_RESPONSE_BACK, textToSpeech.QUEUE_FLUSH, stringStringHashMap);
} else {
textToSpeech.stop();
}
}
private void checkPermission() {
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.M) {
if (!(ContextCompat.checkSelfPermission(this, Manifest.permission.RECORD_AUDIO) == PackageManager.PERMISSION_GRANTED)) {
Intent intent = new Intent(Settings.ACTION_APPLICATION_DETAILS_SETTINGS,
Uri.parse("package:" + getPackageName()));
startActivity(intent);
finish();
}
}
}
protected void onDestroy(){
if(textToSpeech != null) {
textToSpeech.stop();
textToSpeech.shutdown();
textToSpeech = null;
}
super.onDestroy();
}
According to the UtteranceProgressListener docs, "The callbacks specified in this method can be called from multiple threads."
To verify this, you can add
boolean wasCalledFromBackgroundThread = (Thread.currentThread().getId() != 1);
Log.i("XXX", "was onDone() called on a background thread? : " + wasCalledFromBackgroundThread);
to the onDone() method body.
In my experience, they are called on background threads more often than not.
So, inside your onDone() method body, I would suggest surrounding the UI manipulation with runOnUiThread() like so:
#Override
public void onDone(String utteranceId) {
runOnUiThread(new Runnable() {
#Override
public void run() {
// possible method one:
SimulateButton.performClick();
// possible method two:
//mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
}
});
}

Showing a Toast in Text To Speech if not Supported

When adding Toast to TTS button in Fragement , i get this error "Cannot resolve method 'getApplicationContext()" even i tried getActivty(). and getContext(). , then more errors showed up,this is the toast :
Toast.makeText(getApplicationContext(), "Not Supported", Toast.LENGTH_SHORT).show();
Here is the fragement code :
TextToSpeech toSpeech;
int result;
EditText editText;
String text;
#Nullable
#Override
public View onCreateView(LayoutInflater inflater, #Nullable ViewGroup container, #Nullable Bundle savedInstanceState) {
View v = inflater.inflate(R.layout.fragment_home, container, false);
editText = v.findViewById(R.id.editText);
toSpeech = new TextToSpeech(HomeFragment.this, new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS) {
Locale locale = new Locale("tr-TR");
int result = toSpeech.setLanguage(locale);
} else {
Toast.makeText(getApplicationContext(), "Not Supported", Toast.LENGTH_SHORT).show();
}
}
});
return v;
}
public void TTS(View view) {
switch (view.getId()) {
case R.id.bplay:
if (result == TextToSpeech.LANG_MISSING_DATA || result == TextToSpeech.LANG_NOT_SUPPORTED) {
} else {
text = editText.getText().toString();
toSpeech.speak(text, TextToSpeech.QUEUE_FLUSH, null);
toSpeech.setSpeechRate((float) 0.8);
toSpeech.setPitch((float) 0.7);
}
break;
}
}
#Override
public void onDestroy() {
super.onDestroy();
if (toSpeech != null) ;
{
assert toSpeech != null;
toSpeech.stop();
toSpeech.shutdown();
}
}
}
new TextToSpeech(HomeFragment.this
You should use getActivity().
Return the FragmentActivity this fragment is currently associated
with.
Toast.makeText(getActivity(), "Not Supported", Toast.LENGTH_SHORT).show();
new TextToSpeech(getActivity()
Finally
toSpeech = new TextToSpeech(getActivity(), new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS) {
Locale locale = new Locale("tr-TR");
int result = toSpeech.setLanguage(locale);
} else {
Toast.makeText(getActivity(), "Not Supported", Toast.LENGTH_SHORT).show();
}
}
});
First of all,
In your code you have given HomeFragment.this which is not proper in fragment.
toSpeech = new TextToSpeech(**HomeFragment.this**, new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS) {
Locale locale = new Locale("tr-TR");
int result = toSpeech.setLanguage(locale);
} else {
Toast.makeText(getApplicationContext(), "Not Supported", Toast.LENGTH_SHORT).show();
}
}
});
You need to change HomeFragment.this with getActivity() and also change getApplicationContext() with getActivity() in your code like below : -
toSpeech = new TextToSpeech(getActivity(), new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS) {
Locale locale = new Locale("tr-TR");
int result = toSpeech.setLanguage(locale);
} else {
Toast.makeText(getActivity(), "Not Supported", Toast.LENGTH_SHORT).show();
}
}
});
Second Option
If in case still Toast is not working properly using getActivity() then you can also try
getActivity().getBaseContext();
like below:-
Toast.makeText(getActivity().getBaseContext(), "Not Supported", Toast.LENGTH_SHORT).show();

synthesizeToFile failed in Android

TextToSpeech is initialized like this in onCreate()
tts = new TextToSpeech(this, this);
onInit coded like this.
#Override
public void onInit(int status) {
if (status == TextToSpeech.SUCCESS) {
tts.setOnUtteranceProgressListener(new UtteranceProgressListener() {
#Override
public void onStart(String s) {
Log.v(TAG, "onStart : " + s);
}
#Override
public void onDone(String s) {
tts.setLanguage(Locale.US);
fabSpeak.setEnabled(true);
Log.v(TAG, "Proceed");
}
#Override
public void onError(String s) {
Log.v(TAG, "onError : " + s);
}
});
Log.v(TAG, "Proceed2");
} else {
Log.e(TAG, "Initilization Failed!");
}
}
only Proceed2 printed. Proceed is printed only on when I call
tts.speak(text, TextToSpeech.QUEUE_FLUSH, null, "1");
If I synthesizeToFile without call speak like this
int test = tts.synthesizeToFile(text, null, file, "tts");
if (test == TextToSpeech.SUCCESS) {
Log.v(TAG, "Success");
}
This log prints Success, but file is empty. If I synthesize file after called speak then file has data.
But I want to synthesizeToFile without call speak. I don't know what is wrong here.

How can I get Text To Speech to read a foreign language?

When I check which languages are available Thai (th)is available butit doesn't read the text
#SuppressLint("NewApi")
private void speak() {
if(tts!=null && tts.isSpeaking()){
tts.stop();
}else{
tts = new TextToSpeech(this, this);
tts.setLanguage(Locale.forLanguageTag("th")); //tts.getAvailableLanguages().;
tts.setSpeechRate(0.7f);
}
}
#Override
public void onInit(int status) {
tts.speak("ซึ่งมีระยะทางส่วนใหญ่เป็น ทางหลวงแผ่นดินหมายเลข (สายบางนา - หาดเล็ก) เป็นเส้นทางคมนาคมหลักเส้นหนึ่งของประเทศไทย ", TextToSpeech.QUEUE_FLUSH, null);
}
Edit your code like this:
#SuppressLint("NewApi")
private void speak() {
if(tts!=null && tts.isSpeaking()) {
tts.stop();
}else{
tts = new TextToSpeech(this, this);
}
}
#Override public void onInit(int status) {
if (status == TextToSpeech.SUCCESS) {
int res = tts.setLanguage("th_TH");
//tts.getAvailableLanguages().;
tts.setSpeechRate(0.7f);
if (res >= TextToSpeech.LANG_AVAILABLE) {
tts.speak("ซึ่งมีระยะทางส่วนใหญ่เป็น ทางหลวงแผ่นดินหมายเลข (สายบางนา - หาดเล็ก) เป็นเส้นทางคมนาคมหลักเส้นหนึ่งของประเทศไทย ", TextToSpeech.QUEUE_FLUSH, null);
}
}
Because TextToSpeech instance is created asynchronously, so you can hear synthesis result when you control your tts after onInit() method was done.

Android TextToSpeech in Chrome ARC

I'm trying to port my android app to Chrome using ARCWelder. The TextToSpeech component doesn't seem to work. In one activity I have an indeterminate progress circle waiting until the TTS engine is initialized. On Chrome, it either spins forever or returns a NullPointerException. Is TTS not available in Chrome? Running ChromeOS on a Chromebox.
UtteranceProgressListener ttsListener = new UtteranceProgressListener() {
#Override
public void onStart(String s) {
Logg.d("speech started: " + s);
if (loadingDialog.isShowing()) {
loadingDialog.dismiss();
}
}
#Override
public void onDone(String s) {
Logg.d("speech done: " + s);
if (s.equals("1")) {
nextWord();
} else if (s.equals("2")) {
CheckLeave();
}
}
#Override
public void onError(String s) {
Logg.e("Text to Speech error speaking: " + s);
}
};
#Override
protected void onCreate(Bundle savedInstanceState) {
showProgressDialog();
}
#Override
protected void onResume() {
if (tts==null) {
Logg.d("re-initializing TTS");
tts=new TextToSpeech(getApplicationContext(),
new TextToSpeech.OnInitListener() {
#Override
public void onInit(int status) {
if(status != TextToSpeech.ERROR){
tts.setSpeechRate(.5f + .25f * (Integer)KVDB.GetValue("speechRate",2));
tts.setLanguage(Locale.US);
if (pauseTime != 0) {
//Paused. Say nothing.
} else if (currentWord == null) {
startTime = new ExcelDate();
nextWord();
} else if (currentWord.length() == 0) {
nextWord();
} else {
reSpeak();
}
}
}
});
tts.setOnUtteranceProgressListener(ttsListener);
}
super.onResume();
}
private void showProgressDialog() {
loadingDialog = new ProgressDialog(this);
loadingDialog.setProgressStyle(ProgressDialog.STYLE_SPINNER);
loadingDialog.setTitle(getString(R.string.test_loading_msg));
loadingDialog.show();
}
As you found, it does look like we don't have any kind of default TTS service provider as part of the ARC package. There is not one provided by the base Android OS at all.
Please feel free to file a bug for it.

Categories

Resources