SpeechRecognitionEngine 在计算机被锁定时停止识别

SpeechRecognitionEngine stops recognizing when computer is locked

我正在尝试创建一个语音识别程序,它需要 运行 在锁定的 Windows 计算机上,作为家庭自动化项目的一部分。但似乎 SpeechRecognitionEngine 在计算机锁定时停止识别(并在计算机解锁时继续)。

我目前的测试程序是这样的:

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Microsoft.Speech.Recognition;
using System.Globalization;

namespace WindowsFormsApplication1
{
    public partial class Form1 : Form
    {
        SpeechRecognitionEngine sre;

        public Form1()
        {
            InitializeComponent();
            CultureInfo ci = new CultureInfo("en-us");
            sre = new SpeechRecognitionEngine(ci);
            sre.SetInputToDefaultAudioDevice();
            GrammarBuilder gb = new GrammarBuilder("Hello");
            sre.LoadGrammarAsync(new Grammar(gb));
            sre.SpeechRecognized += sre_SpeechRecognized;
            sre.RecognizeAsync(RecognizeMode.Multiple);
        }

        void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            listBox1.Items.Add(DateTime.Now.ToString() + " " + e.Result.Text);
        }
    }
}

我想知道是否可以将 SpeechRecognitionEngine 的输入(可能使用 SetInputToAudioStreamSetInputToWaveStream 方法)更改为麦克风输入的实时音频流,这样就可以了围绕问题。因为好像电脑的时候麦克风没有关掉(用录音机试过)。

不幸的是,我无法找到一种方法来获得麦克风输入的实时流。

我找到了使用 NAudio (http://naudio.codeplex.com/) and the SpeechStreamer class from this Whosebug answer () 的解决方法。

更新后的测试程序,在计算机锁定时继续识别,如下所示:

using System;
using System.Collections.Generic;
using System.ComponentModel;
using System.Data;
using System.Drawing;
using System.Linq;
using System.Text;
using System.Windows.Forms;
using Microsoft.Speech.Recognition;
using System.Globalization;
using NAudio.Wave;
using System.IO;
using System.IO.Pipes;

namespace WindowsFormsApplication1
{
    public partial class Form1 : Form
    {
        SpeechRecognitionEngine sre;
        WaveIn wi;
        SpeechStreamer ss;

        public Form1()
        {
            InitializeComponent();

            WaveCallbackInfo callbackInfo = WaveCallbackInfo.FunctionCallback();
            wi = new WaveIn(callbackInfo);
            ss = new SpeechStreamer(100000);
            wi.DataAvailable += wi_DataAvailable;
            wi.StartRecording();

            CultureInfo ci = new CultureInfo("en-us");
            sre = new SpeechRecognitionEngine(ci);
            // The default format for WaveIn is 8000 samples/sec, 16 bit, 1 channel
            Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo safi = new Microsoft.Speech.AudioFormat.SpeechAudioFormatInfo(8000, Microsoft.Speech.AudioFormat.AudioBitsPerSample.Sixteen, Microsoft.Speech.AudioFormat.AudioChannel.Mono);
            sre.SetInputToAudioStream(ss, safi);
            GrammarBuilder gb = new GrammarBuilder("Hello");
            sre.LoadGrammarAsync(new Grammar(gb));
            sre.SpeechRecognized += sre_SpeechRecognized;
            sre.RecognizeAsync(RecognizeMode.Multiple);
        }

        void wi_DataAvailable(object sender, WaveInEventArgs e)
        {
            ss.Write(e.Buffer, 0, e.BytesRecorded);
        }

        void sre_SpeechRecognized(object sender, SpeechRecognizedEventArgs e)
        {
            listBox1.Items.Add(DateTime.Now.ToString() + " " + e.Result.Text);
        }
    }
}