Implementing Continuous Voice Recognition in Flutter Apps


In this tutorial, I will share how to implement continuous voice recognition in a Flutter app. To achieve this, I will use a Flutter package called voice_recognition_flutter. I am excited to announce that I developed this package, so let's dive into the implementation.
install package
dependencies:
voice_recognition_flutter: ^1.1.0
after install go to the android/app/src/main/AndroidManifest.xml
to add following permission
<uses-permission android:name="android.permission.RECORD_AUDIO" />
<uses-permission android:name="android.permission.INTERNET" />
<queries>
<intent>
<action android:name="android.speech.RecognitionService" />
</intent>
</queries>
after adding these following permission jump to main.dart
file to write logic to get a some interesting view
import 'dart:async';
import 'package:flutter/material.dart';
import 'package:voice_recognition_flutter/voice_recognition.dart';
void main() {
runApp( const MyApp());
}
class MyApp extends StatelessWidget {
const MyApp({super.key});
@override
Widget build(BuildContext context) {
return const MaterialApp(
title: 'Voice Recognition Demo',
home: VoiceRecognitionDemo(),
);
}
}
class VoiceRecognitionDemo extends StatefulWidget {
const VoiceRecognitionDemo({super.key});
@override
_VoiceRecognitionDemoState createState() => _VoiceRecognitionDemoState();
}
class _VoiceRecognitionDemoState extends State<VoiceRecognitionDemo> {
final VoiceRecognition _recognition = VoiceRecognition();
String _text = '';
bool _isListening = false;
double _voiceLevel = 0.0;
List<String> _locales = [];
String _currentLocale = "";
@override
void initState() {
super.initState();
scheduleMicrotask(()async{
await _loadLocales();
});
_initRecognition();
}
Future<void> _loadLocales() async {
final locales = await _recognition.getAllLocal();
print(locales.runtimeType);
setState(() {
_locales.clear();
//remove duplicates to avoid dropdown error
_locales.addAll(locales.toSet().toList());
_currentLocale = locales.first;
});
}
void _initRecognition() {
_recognition.setLanguages(_currentLocale);
_recognition.listenResult(
onReadyForSpeech: (onReadyForSpeech) {},
onBeginningOfSpeech: (onBeginningOfSpeech) {},
onRmsChanged: (rms) {
setState(() => _voiceLevel = rms! / 10);
},
onBufferReceived: (onBufferReceived) {},
onEndOfSpeech: (onEndOfSpeech) {},
onError: (onError)async {
print("Error: $onError");
await _recognition.stopVoice();
setState(() => _isListening = false);
},
onResults: (onResults) async {
setState(() => _text += "$onResults ");
//call this method for Continuous voice recognition
await _recognition.startVoice();
},
onPartialResults: (onPartialResults) {},
onEvent: (onEvent) {});
}
@override
Widget build(BuildContext context) {
print(_locales.length);
return Scaffold(
appBar: AppBar(title: const Text('Voice Recognition')),
body: Padding(
padding: const EdgeInsets.all(16),
child: Column(
children: [
DropdownButton<String>(
value: _currentLocale,
items: _locales.map((locale) {
return DropdownMenuItem(
value: locale,
child: Text(locale),
);
}).toList(),
onChanged: (value) {
setState(() {
_currentLocale = value!;
_recognition.setLanguages(_currentLocale);
});
},
),
const SizedBox(height: 20),
const Text("Voice Level:"),
LinearProgressIndicator(
value: _voiceLevel,
minHeight: 15,
backgroundColor: Colors.grey[300],
valueColor: AlwaysStoppedAnimation<Color>(
Colors.blue.withOpacity(_isListening ? 1.0 : 0.3)
),
),
const SizedBox(height: 20),
Expanded(
child: Container(
padding: const EdgeInsets.all(12),
decoration: BoxDecoration(
border: Border.all(color: Colors.grey),
borderRadius: BorderRadius.circular(8),
),
child: SingleChildScrollView(
child: Text(_text.isEmpty ? 'Say something...' : _text),
),
),
),
const SizedBox(height: 20),
Row(
mainAxisAlignment: MainAxisAlignment.spaceEvenly,
children: [
ElevatedButton(
onPressed: _isListening ? null : () async {
setState(() => _isListening = true);
await _recognition.startVoice();
},
child: const Text('Start'),
),
ElevatedButton(
onPressed: !_isListening ? null : () async {
setState(() => _isListening = false);
await _recognition.stopVoice();
},
child: const Text('Stop'),
),
ElevatedButton(
onPressed: () => setState(() => _text = ''),
child: const Text('Clear'),
),
],
),
],
),
),
);
}
}
Note: you can call startVoice()
function in onResults
listener for continuous voice recognition
I have tested it works fine with en
output
Happy Coding :)
Subscribe to my newsletter
Read articles from Sabitur Rahman directly inside your inbox. Subscribe to the newsletter, and don't miss out.
Written by

Sabitur Rahman
Sabitur Rahman
Software Engineer