Running Android Speech Recognition as Service: will not start - android

I'm using the solution here: Android Speech Recognition as a service on Android 4.1 & 4.2 The code below gets to the onStartCommand() method, however the speech recogntion never seems to kick off, as evidenced by the fact that the onReadyForSpeech() method is never called.
UPDATE:
So I added and that allowed the onReadyForSpeech() to be called , BUT onError() is called with error code: 6 after the onReadyForSpeech() method is complete (this goes into a continuous loop because the start listening code is started again after onError() is called). Like Hoan Nguyen states below, error code 6 is ERROR_SPEECH_TIMEOUT BUT I never see a speech recognition dialog come up so I know to start speaking. I also tried to start speaking immediately after kicking off this code without that dialog coming and still get error code 6, what am I doing wrong here?
UPDATE (9-06-13):
Like Hoan stated, there is no dialog that comes up. You just start talking as soon as listening is called. But I ran into another issue where trying to restart the listener after every onResult() is called...That issue and the complete code for this can be found here: Android Speech Speech Recognition: Repeated Calling of SpeechRecognizer.startListening() fails on JB 4.1.2
My activity code that calls the service:
startService(new Intent(VoiceRecognition.this, VoiceRecogService.class));
My service:
public class VoiceRecogService extends Service
{
protected AudioManager mAudioManager;
protected SpeechRecognizer mSpeechRecognizer;
protected Intent mSpeechRecognizerIntent;
protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this));
protected boolean mIsListening;
protected volatile boolean mIsCountDownOn;
static final int MSG_RECOGNIZER_START_LISTENING = 1;
static final int MSG_RECOGNIZER_CANCEL = 2;
private int mBindFlag;
private Messenger mServiceMessenger;
#Override
public void onCreate()
{
super.onCreate();
mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener());
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
//mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
}
protected static class IncomingHandler extends Handler
{
private WeakReference<VoiceRecogService> mtarget;
IncomingHandler(VoiceRecogService target)
{
mtarget = new WeakReference<VoiceRecogService>(target);
}
#Override
public void handleMessage(Message msg)
{
final VoiceRecogService target = mtarget.get();
switch (msg.what)
{
case MSG_RECOGNIZER_START_LISTENING:
if (Build.VERSION.SDK_INT >= 16);//Build.VERSION_CODES.JELLY_BEAN)
{
// turn off beep sound
target.mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, true);
}
if (!target.mIsListening)
{
target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent);
target.mIsListening = true;
//Log.d(TAG, "message start listening"); //$NON-NLS-1$
}
break;
case MSG_RECOGNIZER_CANCEL:
target.mSpeechRecognizer.cancel();
target.mIsListening = false;
//Log.d(TAG, "message canceled recognizer"); //$NON-NLS-1$
break;
}
}
}
// Count down timer for Jelly Bean work around
protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000)
{
#Override
public void onTick(long millisUntilFinished)
{
// TODO Auto-generated method stub
}
#Override
public void onFinish()
{
mIsCountDownOn = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL);
try
{
mServerMessenger.send(message);
message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
}
};
#Override
public int onStartCommand (Intent intent, int flags, int startId)
{
//mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
try
{
Message msg = new Message();
msg.what = MSG_RECOGNIZER_START_LISTENING;
mServerMessenger.send(msg);
}
catch (RemoteException e)
{
}
return START_NOT_STICKY;
}
#Override
public void onDestroy()
{
super.onDestroy();
if (mIsCountDownOn)
{
mNoSpeechCountDown.cancel();
}
if (mSpeechRecognizer != null)
{
mSpeechRecognizer.destroy();
}
}
protected class SpeechRecognitionListener implements RecognitionListener
{
#Override
public void onBeginningOfSpeech()
{
// speech input will be processed, so there is no need for count down anymore
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
//Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$
}
#Override
public void onBufferReceived(byte[] buffer)
{
String sTest = "";
}
#Override
public void onEndOfSpeech()
{
Log.d("TESTING: SPEECH SERVICE", "onEndOfSpeech"); //$NON-NLS-1$
}
#Override
public void onError(int error)
{
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
mIsListening = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
try
{
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
//Log.d(TAG, "error = " + error); //$NON-NLS-1$
}
#Override
public void onEvent(int eventType, Bundle params)
{
}
#Override
public void onPartialResults(Bundle partialResults)
{
}
#Override
public void onReadyForSpeech(Bundle params)
{
if (Build.VERSION.SDK_INT >= 16);//Build.VERSION_CODES.JELLY_BEAN)
{
mIsCountDownOn = true;
mNoSpeechCountDown.start();
mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, false);
}
Log.d("TESTING: SPEECH SERVICE", "onReadyForSpeech"); //$NON-NLS-1$
}
#Override
public void onResults(Bundle results)
{
//Log.d(TAG, "onResults"); //$NON-NLS-1$
}
#Override
public void onRmsChanged(float rmsdB)
{
}
}
#Override
public IBinder onBind(Intent arg0) {
// TODO Auto-generated method stub
return null;
}
}

Related

Android service not working with bindService()

Actually I want to do speech to text continuously in the background so actually I am using the solution which is given in the below link but i m not getting how can i implement it.
link
I have done this so far but still my service is not running and i am not getting any output
So I think that i have done something wrong in its implementation.Please help me and correct my code in order to run service and get my output
This is my mainactivity.java
public class MainActivity extends AppCompatActivity {
private int mBindFlag ;
private Messenger mServiceMessenger ;
private Button btStartService;
private TextView tvText;
#Override
protected void onStart() {
super.onStart();
Log.d("check", "onStart: ");
bindService(new Intent(getApplicationContext(), MyService.class), mServiceConnection, mBindFlag);
}
#Override
protected void onStop() {
super.onStop();
if (mServiceMessenger != null) {
unbindService(mServiceConnection);
mServiceMessenger = null;
}
}
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
Intent service = new Intent(this, MyService.class);
this.startService(service);
mBindFlag = Context.BIND_ABOVE_CLIENT;
Log.d("check", "onCreate: "+mBindFlag);
btStartService = (Button) findViewById(R.id.btStartService);
tvText = (TextView) findViewById(R.id.tvText);
int PERMISSION_ALL = 1;
String[] PERMISSIONS = {
Manifest.permission.RECORD_AUDIO
};
if (!hasPermissions(this, PERMISSIONS)) {
ActivityCompat.requestPermissions(this, PERMISSIONS, PERMISSION_ALL);
}
}
public static boolean hasPermissions(Context context, String... permissions) {
if (context != null && permissions != null) {
for (String permission : permissions) {
if (ActivityCompat.checkSelfPermission(context, permission) != PackageManager.PERMISSION_GRANTED) {
return false;
}
}
}
return true;
}
private final ServiceConnection mServiceConnection = new ServiceConnection()
{
#Override
public void onServiceConnected(ComponentName name, IBinder service)
{
if (DEBUG) {
Log.d("check", "onServiceConnected");} //$NON-NLS-1$
mServiceMessenger = new Messenger(service);
Message msg = new Message();
msg.what = MyService.MSG_RECOGNIZER_START_LISTENING;
try
{
mServiceMessenger.send(msg);
}
catch (RemoteException e)
{
e.printStackTrace();
}
}
#Override
public void onServiceDisconnected(ComponentName name)
{
if (DEBUG) {Log.d("check", "onServiceDisconnected");} //$NON-NLS-1$
mServiceMessenger = null;
}
};
}
This is the code for my background service
public class MyService extends Service
{
protected AudioManager mAudioManager;
protected SpeechRecognizer mSpeechRecognizer;
protected Intent mSpeechRecognizerIntent;
protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this));
protected boolean mIsListening;
protected volatile boolean mIsCountDownOn;
static final int MSG_RECOGNIZER_START_LISTENING = 1;
static final int MSG_RECOGNIZER_CANCEL = 2;
#Override
public void onCreate()
{
super.onCreate();
mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener());
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
Log.d("check", "onCreate: "+"service");
}
protected static class IncomingHandler extends Handler
{
private WeakReference<MyService> mtarget;
IncomingHandler(MyService target)
{
mtarget = new WeakReference<MyService>(target);
}
#Override
public void handleMessage(Message msg)
{
final MyService target = mtarget.get();
switch (msg.what)
{
case MSG_RECOGNIZER_START_LISTENING:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
// turn off beep sound
target.mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, true);
}
if (!target.mIsListening)
{
target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent);
target.mIsListening = true;
Log.d("check", "message start listening"); //$NON-NLS-1$
}
break;
case MSG_RECOGNIZER_CANCEL:
target.mSpeechRecognizer.cancel();
target.mIsListening = false;
Log.d("check", "message canceled recognizer"); //$NON-NLS-1$
break;
}
}
}
// Count down timer for Jelly Bean work around
protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000)
{
#Override
public void onTick(long millisUntilFinished)
{
// TODO Auto-generated method stub
}
#Override
public void onFinish()
{
mIsCountDownOn = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL);
try
{
mServerMessenger.send(message);
message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
}
};
#Override
public void onDestroy()
{
super.onDestroy();
if (mIsCountDownOn)
{
mNoSpeechCountDown.cancel();
}
if (mSpeechRecognizer != null)
{
mSpeechRecognizer.destroy();
}
}
protected class SpeechRecognitionListener implements RecognitionListener
{
private static final String TAG = "check";
#Override
public void onBeginningOfSpeech()
{
// speech input will be processed, so there is no need for count down anymore
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
//Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$
}
#Override
public void onBufferReceived(byte[] buffer)
{
}
#Override
public void onEndOfSpeech()
{
//Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$
}
#Override
public void onError(int error)
{
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
mIsListening = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
try
{
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
//Log.d(TAG, "error = " + error); //$NON-NLS-1$
}
#Override
public void onEvent(int eventType, Bundle params)
{
}
#Override
public void onPartialResults(Bundle partialResults)
{
}
#Override
public void onReadyForSpeech(Bundle params)
{
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
mIsCountDownOn = true;
mNoSpeechCountDown.start();
mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, false);
}
Log.d("check", "onReadyForSpeech");
}
#Override
public void onResults(Bundle results)
{
Toast.makeText(getApplicationContext(),results.toString(),Toast.LENGTH_LONG).show();
}
#Override
public void onRmsChanged(float rmsdB)
{
}
}
#Override
public IBinder onBind(Intent arg0) {
return mServerMessenger.getBinder();
}
}
I got my answer I have not enabled the service from the manifest

Loop SpeechRecognizer in Android [duplicate]

I have managed to get continuous speech recognition working (using the SpeechRecognizer class) as a service on all Android versions up to 4.1. My question concerns getting it working on versions 4.1 and 4.2 as it is known there is a problem in that the API doesn't do as documented in that a few seconds after voice recognition is started, if no voice input has been detected then it's as if the speech recogniser dies silently. (http://code.google.com/p/android/issues/detail?id=37883)
I have found a question which proposes a work-around to this problem (Voice Recognition stops listening after a few seconds), but I am unsure as how to implement the Handler required for this solution. I am aware of the 'beep' that will happen every few seconds that this workaround will cause, but getting continuous voice recognition is more important for me.
If anyone has any other alternative workarounds then I'd like to hear those too.
This is a work around for android version 4.1.1.
public class MyService extends Service
{
protected AudioManager mAudioManager;
protected SpeechRecognizer mSpeechRecognizer;
protected Intent mSpeechRecognizerIntent;
protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this));
protected boolean mIsListening;
protected volatile boolean mIsCountDownOn;
private boolean mIsStreamSolo;
static final int MSG_RECOGNIZER_START_LISTENING = 1;
static final int MSG_RECOGNIZER_CANCEL = 2;
#Override
public void onCreate()
{
super.onCreate();
mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener());
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
}
protected static class IncomingHandler extends Handler
{
private WeakReference<MyService> mtarget;
IncomingHandler(MyService target)
{
mtarget = new WeakReference<MyService>(target);
}
#Override
public void handleMessage(Message msg)
{
final MyService target = mtarget.get();
switch (msg.what)
{
case MSG_RECOGNIZER_START_LISTENING:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
// turn off beep sound
if (!mIsStreamSolo)
{
mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, true);
mIsStreamSolo = true;
}
}
if (!target.mIsListening)
{
target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent);
target.mIsListening = true;
//Log.d(TAG, "message start listening"); //$NON-NLS-1$
}
break;
case MSG_RECOGNIZER_CANCEL:
if (mIsStreamSolo)
{
mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, false);
mIsStreamSolo = false;
}
target.mSpeechRecognizer.cancel();
target.mIsListening = false;
//Log.d(TAG, "message canceled recognizer"); //$NON-NLS-1$
break;
}
}
}
// Count down timer for Jelly Bean work around
protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000)
{
#Override
public void onTick(long millisUntilFinished)
{
// TODO Auto-generated method stub
}
#Override
public void onFinish()
{
mIsCountDownOn = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL);
try
{
mServerMessenger.send(message);
message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
}
};
#Override
public void onDestroy()
{
super.onDestroy();
if (mIsCountDownOn)
{
mNoSpeechCountDown.cancel();
}
if (mSpeechRecognizer != null)
{
mSpeechRecognizer.destroy();
}
}
protected class SpeechRecognitionListener implements RecognitionListener
{
#Override
public void onBeginningOfSpeech()
{
// speech input will be processed, so there is no need for count down anymore
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
//Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$
}
#Override
public void onBufferReceived(byte[] buffer)
{
}
#Override
public void onEndOfSpeech()
{
//Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$
}
#Override
public void onError(int error)
{
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
mIsListening = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
try
{
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
//Log.d(TAG, "error = " + error); //$NON-NLS-1$
}
#Override
public void onEvent(int eventType, Bundle params)
{
}
#Override
public void onPartialResults(Bundle partialResults)
{
}
#Override
public void onReadyForSpeech(Bundle params)
{
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
mIsCountDownOn = true;
mNoSpeechCountDown.start();
}
Log.d(TAG, "onReadyForSpeech"); //$NON-NLS-1$
}
#Override
public void onResults(Bundle results)
{
//Log.d(TAG, "onResults"); //$NON-NLS-1$
}
#Override
public void onRmsChanged(float rmsdB)
{
}
}
}
02/16/2013 - Fix beep sound if you use Text To Speech in your app make sure to turn off Solo stream in onResults
If you really want to implement continuous listening without internet connection you need to consider third-party packages, one of them is CMUSphinx, check Pocketsphinx android demo for example how to listen for keyword efficiently in offline and react on the specific commands like a key phrase "oh mighty computer". The code to do that is simple:
you create a recognizer and just add keyword spotting search:
recognizer = defaultSetup()
.setAcousticModel(new File(modelsDir, "hmm/en-us-semi"))
.setDictionary(new File(modelsDir, "lm/cmu07a.dic"))
.setKeywordThreshold(1e-5f)
.getRecognizer();
recognizer.addListener(this);
recognizer.addKeywordSearch(KWS_SEARCH_NAME, KEYPHRASE);
switchSearch(KWS_SEARCH_NAME);
and define a listener:
#Override
public void onPartialResult(Hypothesis hypothesis) {
String text = hypothesis.getHypstr();
if (text.equals(KEYPHRASE))
// do something
}
For any of you who are trying to silence the beep sound, regrading the #HoanNguyen answer which is very good but be careful as said in the api set setStreamSolo is cumulative so if there is in error in the speech recognition and on error is called(for example no internet connection) then setStremSolo true is called again and again which will result in your app silencing the whole phone (very bad)! the solution to that is to add the setStremMute(false) to the speechRecognizer onError.
check out my demo app :
https://github.com/galrom/ContinuesVoiceRecognition
I recommand to use both PockeySphix and SpeechRecognizer.

Android speech recognizer works fine on 5.0.1 but doesn't work on 5.1

I am working on create a speech recognition service on android and followed previous works from others here. The old version was based on android 5.0.1 and it worked perfectly fine.
However, when I try to move it to android 5.1, it always stops in the onError function in the recognition listener with a error code 7, which means ERROR_NO_MATCH.
I don't know why this happened. is there any difference on using speech recognizer in Android 5.0.1 and 5.1?
If someone can help me, I will be very appreciated. Thanks.
Here is my code:
public class speechrecognitionService extends Service
{
protected static AudioManager mAudioManager;
protected SpeechRecognizer mSpeechRecognizer;
protected Intent mSpeechRecognizerIntent;
protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this));
protected boolean mIsListening;
protected volatile boolean mIsCountDownOn;
private static boolean mIsStreamSolo;
private static String voiceCommand;
static final int MSG_RECOGNIZER_START_LISTENING = 1;
static final int MSG_RECOGNIZER_CANCEL = 2;
public static final String TAG = null;
public static final String FEEDBACK = "com.kut.kutcamera";
public static final String COMMAND = "command";
public static final String standardVoiceCommand = "take a photo";
public static final String voiceCommandAlt1 = "take photo";
public static final String voiceCommandAlt2 = "photo";
public static final String voiceCommandAlt3 = "take";
#Override
public void onCreate()
{
super.onCreate();
mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener());
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
Toast.makeText(this, "Service is created", Toast.LENGTH_LONG).show();
}
#Override
public int onStartCommand(Intent intent, int flags, int startId) {
// TODO Auto-generated method stub
Toast.makeText(this, "Say your command loud and clear please.", Toast.LENGTH_LONG).show();
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
//startListenting(true);
//int result = super.onStartCommand(intent,flags,startId);
return super.onStartCommand(intent, flags, startId);
}
protected static class IncomingHandler extends Handler
{
private WeakReference<speechrecognitionService> mtarget;
IncomingHandler(speechrecognitionService target)
{
mtarget = new WeakReference<speechrecognitionService>(target);
}
#Override
public void handleMessage(Message msg)
{
final speechrecognitionService target = mtarget.get();
switch (msg.what)
{
case MSG_RECOGNIZER_START_LISTENING:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
// turn off beep sound
if (!mIsStreamSolo)
{
mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, true);
mIsStreamSolo = true;
}
}
if (!target.mIsListening)
{
target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent);
target.mIsListening = true;
//Log.d(TAG, "message start listening"); //$NON-NLS-1$
}
break;
case MSG_RECOGNIZER_CANCEL:
if (mIsStreamSolo)
{
mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, false);
mIsStreamSolo = false;
}
target.mSpeechRecognizer.cancel();
target.mIsListening = false;
//Log.d(TAG, "message canceled recognizer"); //$NON-NLS-1$
break;
}
}
}
// Count down timer for Jelly Bean work around
protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(30000, 1000)
{
#Override
public void onTick(long millisUntilFinished)
{
// TODO Auto-generated method stub
}
#Override
public void onFinish()
{
mIsCountDownOn = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL);
try
{
mServerMessenger.send(message);
message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
}
};
#Override
public void onDestroy()
{
super.onDestroy();
if (mIsCountDownOn)
{
mNoSpeechCountDown.cancel();
}
if (mSpeechRecognizer != null)
{
mSpeechRecognizer.destroy();
//Toast.makeText(speechrecognitionService.this, "the recognizer is destroyed", Toast.LENGTH_LONG).show();
}
}
protected class SpeechRecognitionListener implements RecognitionListener
{
#Override
public void onBeginningOfSpeech()
{
// speech input will be processed, so there is no need for count down anymore
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
//Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$
}
#Override
public void onBufferReceived(byte[] buffer)
{
}
#Override
public void onEndOfSpeech()
{
//Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$
}
#Override
public void onError(int error)
{
Log.d(TAG, "onError: " + error);
if ((error == SpeechRecognizer.ERROR_NO_MATCH)
|| (error == SpeechRecognizer.ERROR_SPEECH_TIMEOUT)){
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
mIsListening = false;
Log.d("TESTING: SPEECH SERVICE: CALL START", "onError()");
startListening(true);
}
}
#Override
public void onEvent(int eventType, Bundle params)
{
}
#Override
public void onPartialResults(Bundle partialResults)
{
}
#Override
public void onReadyForSpeech(Bundle params)
{
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
mIsCountDownOn = true;
mNoSpeechCountDown.start();
}
//Toast.makeText(speechrecognitionService.this, "Please say your command loudly and clearly", Toast.LENGTH_LONG).show();
}
#Override
public void onResults(Bundle results)
{
//Log.d(TAG, "onResults"); //$NON-NLS-1$
ArrayList strlist = results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);
voiceCommand = (String) strlist.get(0);
Toast.makeText(speechrecognitionService.this, voiceCommand, Toast.LENGTH_LONG).show();
if (voiceCommand.equals(standardVoiceCommand) || voiceCommand.equals(voiceCommandAlt1)
|| voiceCommand.equals(voiceCommandAlt2)||voiceCommand.equals(voiceCommandAlt3))
publishResults();
else {
Toast.makeText(speechrecognitionService.this, "Say the command again please.", Toast.LENGTH_LONG).show();
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
}
}
#Override
public void onRmsChanged(float rmsdB)
{
}
private void startListening(boolean bMuteSound){
Log.d("TESTING: SPEECH SERVICE: startListening()", mIsListening? "true":"false");
if (bMuteSound==true && Build.VERSION.SDK_INT >= 16)//Build.VERSION_CODES.JELLY_BEAN)
{
// turn off beep sound
mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, true);
}
if (!mIsListening)
{
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
//recognizeSpeechDirectly ();
mIsListening = true;
}
}
}
#Override
public IBinder onBind(Intent intent) {
// TODO Auto-generated method stub
return null;
}
private void publishResults() {
Intent intent = new Intent(FEEDBACK);
intent.putExtra(COMMAND, voiceCommand);
//intent.putExtra(RESULT, result);
sendBroadcast(intent);
}
}

Android v <= ICS continuous speech recognition ERROR_NETWORK_TIMEOUT

I created a continuous speech recognition Android app by following this thread, it works perfectly on my Moto G (4.4.2) but for some reason it crashes on 2.3.5 and 2.3.6, I already tried by removing the timer workaround for 4.1 and newer versions and it's the same.
The thing is that it works fine when I say words but when it catches some noise (like a mouse click) it takes like 10 seconds recognizing (or whatever it does after onEndOfSpeech()) and then it throws an ERROR_NETWORK_TIMEOUT.
My problem might be related to this one, but inmy case the error always happens.
I would really appreciate some help here.
Update:
Here is a minimum example which is based on the first link I mention, it still fails to me, additionally I had to add the service to the manifest (<service android:name="com.test.MyService"></service>), the two text views in activity_main.xml and RECORD_AUDIO permission.
MainActivity:
public class MainActivity extends Activity {
protected static final boolean DEBUG = false;
protected static final String TAG = null;
private int mBindFlag;
private MainActivity activityContext;
private Intent speechService;
public static TextView methodText;
public static TextView resultsText;
private static Messenger mServiceMessenger;
#Override
protected void onCreate(Bundle savedInstanceState) {
super.onCreate(savedInstanceState);
setContentView(R.layout.activity_main);
methodText = (TextView) findViewById(R.id.textView1);
resultsText = (TextView) findViewById(R.id.textView2);
activityContext = this;
speechService = new Intent(activityContext, MyService.class);
activityContext.startService(speechService);
mBindFlag = Build.VERSION.SDK_INT < Build.VERSION_CODES.ICE_CREAM_SANDWICH ? 0 : Context.BIND_ABOVE_CLIENT;
}
#Override
public boolean onCreateOptionsMenu(Menu menu) {
// Inflate the menu; this adds items to the action bar if it is present.
getMenuInflater().inflate(R.menu.main, menu);
return true;
}
#Override
protected void onStart() {
super.onStart();
bindService(new Intent(this, MyService.class), mServiceConnection, mBindFlag);
}
#Override
protected void onStop()
{
super.onStop();
if (mServiceMessenger != null)
{
unbindService(mServiceConnection);
mServiceMessenger = null;
}
}
private final ServiceConnection mServiceConnection = new ServiceConnection()
{
#Override
public void onServiceConnected(ComponentName name, IBinder service)
{
if (DEBUG) {Log.d(TAG, "onServiceConnected");} //$NON-NLS-1$
mServiceMessenger = new Messenger(service);
sendMessage(MyService.MSG_RECOGNIZER_START_LISTENING);
}
#Override
public void onServiceDisconnected(ComponentName name)
{
if (DEBUG) {Log.d(TAG, "onServiceDisconnected");} //$NON-NLS-1$
mServiceMessenger = null;
}
}; // mServiceConnection
public static void sendMessage(int type){
Message msg = new Message();
msg.what = type;
try
{
mServiceMessenger.send(msg);
}
catch (RemoteException e)
{
e.printStackTrace();
}
}
#Override
protected void onDestroy(){
super.onDestroy();
activityContext.stopService(speechService);
}
}
Service class:
public class MyService extends Service{
protected static AudioManager mAudioManager;
protected SpeechRecognizer mSpeechRecognizer;
protected Intent mSpeechRecognizerIntent;
protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this));
protected boolean mIsListening;
protected volatile boolean mIsCountDownOn;
private static boolean mIsStreamSolo;
static final int MSG_RECOGNIZER_START_LISTENING = 1;
static final int MSG_RECOGNIZER_CANCEL = 2;
private static final String TAG = null;
#Override
public void onCreate()
{
super.onCreate();
MainActivity.methodText.setText("onCreate");
mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener());
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
}
protected static class IncomingHandler extends Handler
{
private WeakReference<MyService> mtarget;
IncomingHandler(MyService target)
{
mtarget = new WeakReference<MyService>(target);
}
#Override
public void handleMessage(Message msg)
{
MainActivity.methodText.setText("handleMessage");
final MyService target = mtarget.get();
switch (msg.what)
{
case MSG_RECOGNIZER_START_LISTENING:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
// turn off beep sound
if (!mIsStreamSolo)
{
mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, true);
mIsStreamSolo = true;
}
}
if (!target.mIsListening)
{
target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent);
target.mIsListening = true;
//Log.d(TAG, "message start listening"); //$NON-NLS-1$
}
break;
case MSG_RECOGNIZER_CANCEL:
if (mIsStreamSolo)
{
mAudioManager.setStreamSolo(AudioManager.STREAM_VOICE_CALL, false);
mIsStreamSolo = false;
}
target.mSpeechRecognizer.cancel();
target.mIsListening = false;
//Log.d(TAG, "message canceled recognizer"); //$NON-NLS-1$
break;
}
}
}
// Count down timer for Jelly Bean work around
protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000)
{
#Override
public void onTick(long millisUntilFinished)
{
MainActivity.methodText.setText("onTick");
// TODO Auto-generated method stub
}
#Override
public void onFinish()
{
MainActivity.methodText.setText("onFinish");
mIsCountDownOn = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL);
try
{
mServerMessenger.send(message);
message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
}
};
#Override
public void onDestroy()
{
super.onDestroy();
MainActivity.methodText.setText("onDestroy");
if (mIsCountDownOn)
{
mNoSpeechCountDown.cancel();
}
if (mSpeechRecognizer != null)
{
mSpeechRecognizer.destroy();
}
}
protected class SpeechRecognitionListener implements RecognitionListener
{
private static final String TAG = "SpeechApp";
#Override
public void onBeginningOfSpeech()
{
MainActivity.methodText.setText("onBeginningOfSpeech");
// speech input will be processed, so there is no need for count down anymore
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
//Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$
}
#Override
public void onBufferReceived(byte[] buffer)
{
MainActivity.methodText.setText("onBufferReceived");
}
#Override
public void onEndOfSpeech()
{
//Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$
MainActivity.methodText.setText("onEndOfSpeech");
}
#Override
public void onError(int error)
{
MainActivity.methodText.setText("onError");
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
mIsListening = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
try
{
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
//Log.d(TAG, "error = " + error); //$NON-NLS-1$
}
#Override
public void onEvent(int eventType, Bundle params)
{
MainActivity.methodText.setText("onEvent");
}
#Override
public void onPartialResults(Bundle partialResults)
{
MainActivity.methodText.setText("onPartialResults");
}
#Override
public void onReadyForSpeech(Bundle params)
{
MainActivity.methodText.setText("onReadyForSpeech");
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
mIsCountDownOn = true;
mNoSpeechCountDown.start();
}
Log.d(TAG, "onReadyForSpeech"); //$NON-NLS-1$
}
#Override
public void onResults(Bundle results)
{
MainActivity.methodText.setText("onResults");
System.out.println(results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION));
MainActivity.resultsText.setText(results.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION).toString());
mIsListening = false;
MainActivity.sendMessage(MyService.MSG_RECOGNIZER_START_LISTENING);
}
#Override
public void onRmsChanged(float rmsdB)
{
MainActivity.methodText.setText("onRmsChanged");
}
}
#Override
public IBinder onBind(Intent intent) {
MainActivity.methodText.setText("onBind");
// TODO Auto-generated method stub
Log.d(TAG, "onBind"); //$NON-NLS-1$
return mServerMessenger.getBinder();
}
}

Getting speech to text Android

I have been refering to Android Speech Recognition as a service on Android 4.1 & 4.2 post to try and implement speech recognition in a service.
I guess I have got it right. On running in my device I am getting "Ready for Speech" toast message which I have declared inside onReadyForSpeech() function.
According to Hoan Nguyen the person who gave the ans for the above post, we can start speaking as soon as onReadyForSpeech() function is called.
My problem is I don't know how to get the speech which we are speaking and convert it to text and where to do it.
Does any one know how to do it? I know its a very lame question to ask but its my first time working with speech recognition. So please bear with me.
Any help on this is very much appreciated. Thanks in advance :)
public class MyService extends Service
{
protected AudioManager mAudioManager;
protected SpeechRecognizer mSpeechRecognizer;
protected Intent mSpeechRecognizerIntent;
protected final Messenger mServerMessenger = new Messenger(new IncomingHandler(this));
protected boolean mIsListening;
protected volatile boolean mIsCountDownOn;
static final int MSG_RECOGNIZER_START_LISTENING = 1;
static final int MSG_RECOGNIZER_CANCEL = 2;
#Override
public void onCreate()
{
super.onCreate();
mAudioManager = (AudioManager) getSystemService(Context.AUDIO_SERVICE);
mSpeechRecognizer = SpeechRecognizer.createSpeechRecognizer(this);
mSpeechRecognizer.setRecognitionListener(new SpeechRecognitionListener());
mSpeechRecognizerIntent = new Intent(RecognizerIntent.ACTION_RECOGNIZE_SPEECH);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_LANGUAGE_MODEL,
RecognizerIntent.LANGUAGE_MODEL_FREE_FORM);
mSpeechRecognizerIntent.putExtra(RecognizerIntent.EXTRA_CALLING_PACKAGE,
this.getPackageName());
mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
//Toast.makeText(this, "onCreate", Toast.LENGTH_SHORT).show();
Log.d("onCreate","Entered");
}
protected static class IncomingHandler extends Handler
{
private WeakReference<MyService> mtarget;
IncomingHandler(MyService target)
{
mtarget = new WeakReference<MyService>(target);
Log.d("IncomingHandler","Entered");
}
#Override
public void handleMessage(Message msg)
{
Log.d("handleMessage","Entered");
final MyService target = mtarget.get();
switch (msg.what)
{
case MSG_RECOGNIZER_START_LISTENING:
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
// turn off beep sound
target.mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, true);
}
if (!target.mIsListening)
{
target.mSpeechRecognizer.startListening(target.mSpeechRecognizerIntent);
target.mIsListening = true;
Log.d("TAG", "message start listening");
//$NON-NLS-1$
}
break;
case MSG_RECOGNIZER_CANCEL:
target.mSpeechRecognizer.cancel();
target.mIsListening = false;
Log.d("TAG", "message canceled recognizer"); //$NON-NLS-1$
break;
}
}
}
// Count down timer for Jelly Bean work around
protected CountDownTimer mNoSpeechCountDown = new CountDownTimer(5000, 5000)
{
#Override
public void onTick(long millisUntilFinished)
{
// TODO Auto-generated method stub
Log.d("onTick","Entered");
}
#Override
public void onFinish()
{
Log.d("onFinish","Entered");
mIsCountDownOn = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_CANCEL);
try
{
mServerMessenger.send(message);
message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
}
};
#Override
public int onStartCommand(Intent intent, int flags, int startId) {
// TODO Auto-generated method stub
//mSpeechRecognizer.startListening(mSpeechRecognizerIntent);
try
{
Message msg = new Message();
msg.what = MSG_RECOGNIZER_START_LISTENING;
mServerMessenger.send(msg);
}
catch (RemoteException e)
{
Log.d("msg",""+e);
}
return START_NOT_STICKY;
//return super.onStartCommand(intent, flags, startId);
}
#Override
public void onDestroy()
{
super.onDestroy();
if (mIsCountDownOn)
{
mNoSpeechCountDown.cancel();
}
if (mSpeechRecognizer != null)
{
mSpeechRecognizer.destroy();
}
Log.d("onDestroy","Entered");
}
protected class SpeechRecognitionListener implements RecognitionListener
{
private static final String TAG = "Sppech---->";
#Override
public void onBeginningOfSpeech()
{
// speech input will be processed, so there is no need for count down anymore
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
//Log.d(TAG, "onBeginingOfSpeech"); //$NON-NLS-1$
Log.d("onBeginningOfSpeech","Entered");
}
#Override
public void onBufferReceived(byte[] buffer)
{
String sTest = "";
Log.d("onBufferReceived","Entered");
}
#Override
public void onEndOfSpeech()
{
//Log.d(TAG, "onEndOfSpeech"); //$NON-NLS-1$
Log.d("onEndOfSpeech","Entered");
}
#Override
public void onError(int error)
{
if (mIsCountDownOn)
{
mIsCountDownOn = false;
mNoSpeechCountDown.cancel();
}
mIsListening = false;
Message message = Message.obtain(null, MSG_RECOGNIZER_START_LISTENING);
try
{
mServerMessenger.send(message);
}
catch (RemoteException e)
{
}
//Log.d(TAG, "error = " + error); //$NON-NLS-1$
Log.d("onError","Entered");
}
#Override
public void onEvent(int eventType, Bundle params)
{
}
#Override
public void onPartialResults(Bundle partialResults)
{
}
#Override
public void onReadyForSpeech(Bundle params)
{
if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN)
{
mIsCountDownOn = true;
mNoSpeechCountDown.start();
mAudioManager.setStreamMute(AudioManager.STREAM_SYSTEM, false);
}
//Log.d("TAG", "onReadyForSpeech");
Toast.makeText(getApplicationContext(), "Ready for Speech", Toast.LENGTH_SHORT).show();
Log.d("onReadyForSpeech","Entered");//$NON-NLS-1$
}
#Override
public void onResults(Bundle results)
{
//Log.d(TAG, "onResults"); //$NON-NLS-1$
}
#Override
public void onRmsChanged(float rmsdB)
{
}
}
#Override
public IBinder onBind(Intent intent) {
// TODO Auto-generated method stub
return null;
}
}
You get it in onResult(Bundle result), which you can then get what the user speak into an ArrayList
ArrayList<String> matches = result.getStringArrayList(SpeechRecognizer.RESULTS_RECOGNITION);

Categories

Resources