Add delegate

This commit is contained in:
Gregory John Casamento 2020-02-03 11:02:03 -05:00
parent 0ab7edf05a
commit 8c35633a56
7 changed files with 128 additions and 24 deletions

View file

@ -1,4 +1,5 @@
#import <Foundation/Foundation.h>
#import <AppKit/NSSpeechRecognizer.h>
/**
* GSSpeechRecognitionEngine is an abstract speech server. One concrete subclass should

View file

@ -4,5 +4,18 @@
* Dummy implementation of a speech engine. Doesn't do anything.
*/
@implementation GSSpeechRecognitionEngine
+ (GSSpeechRecognitionEngine*)defaultSpeechEngine { return [[self new] autorelease]; }
+ (GSSpeechRecognitionEngine*)defaultSpeechEngine
{
return [[self new] autorelease];
}
- (void) startListening
{
}
- (void) stopListening
{
}
@end

View file

@ -1,15 +1,25 @@
#import <Foundation/Foundation.h>
#import <Foundation/Foundation.h>
#import <AppKit/NSSpeechRecognizer.h>
@class GSSpeechRecognitionEngine;
/**
* GSSpeechRecognitionServer handles all of the engine-agnostic operations. Currently,
* there aren't any, but when the on-screen text interface is added it should
* go in here.
*/
@interface GSSpeechRecognitionServer : NSObject {
GSSpeechRecognitionEngine *engine;
@interface GSSpeechRecognitionServer : NSObject
{
GSSpeechRecognitionEngine *_engine;
id<NSSpeechRecognizerDelegate> _delegate;
}
/**
* Returns a shared instance of the speech server.
*/
+ (id)sharedServer;
- (void) setDelegate: (id<NSSpeechRecognizerDelegate>) delegate;
- (id<NSSpeechRecognizerDelegate>) delegate;
@end

View file

@ -31,8 +31,8 @@ static GSSpeechRecognitionServer *sharedInstance;
- (id)init
{
if (nil == (self = [super init])) { return nil; }
engine = [GSSpeechRecognitionEngine defaultSpeechRecognitionEngine];
if (nil == engine)
_engine = [GSSpeechRecognitionEngine defaultSpeechRecognitionEngine];
if (nil == _engine)
{
[self release];
return nil;
@ -47,10 +47,23 @@ static GSSpeechRecognitionServer *sharedInstance;
- (void) startListening
{
// abstract nothing to do...
}
- (void) stopListening
{
// abstract nothing to do...
}
- (void) setDelegate: (id<NSSpeechRecognizerDelegate>)delegate
{
_delegate = delegate;
}
- (id<NSSpeechRecognizerDelegate>) delegate
{
return _delegate;
}
@end

View file

@ -3,10 +3,6 @@
@interface GSSpeechRecognizer : NSSpeechRecognizer {
NSString *currentVoice;
id delegate;
}
- (id)init;
- (id)delegate;
- (void)setDelegate: (id)aDelegate;
@end

View file

@ -50,6 +50,9 @@ static int clients;
- (id)init
{
self = [super init];
if (self != nil)
{
}
return self;
}
@ -67,13 +70,4 @@ static int clients;
[super dealloc];
}
- (void) setDelegate: (id)delegate
{
}
- (id) delegate
{
return nil;
}
@end

View file

@ -1,4 +1,7 @@
#import "GSSpeechRecognitionEngine.h"
#include <sphinxbase/err.h>
#include <sphinxbase/ad.h>
#include <pocketsphinx/pocketsphinx.h>
/**
@ -13,10 +16,12 @@
ps_decoder_t *ps;
cmd_ln_t *config;
FILE *fh;
char const *hyp, *uttid;
char const *uttid;
int16 buf[512];
int rv;
int32 score;
NSThread *_listeningThread;
id<NSSpeechRecognizerDelegate> _delegate;
}
@end
@ -28,7 +33,7 @@
- (id)init
{
if (nil != (self = [super init]))
if ((self = [super init]) != nil)
{
config = cmd_ln_init(NULL, ps_args(), TRUE,
"-hmm", MODELDIR "/en-us/en-us",
@ -36,18 +41,90 @@
"-dict", MODELDIR "/en-us/cmudict-en-us.dict",
NULL);
ps = ps_init(config);
_listeningThread = nil;
}
return self;
}
- (void) _recognizedWord: (NSString *)word
{
}
/*
* Main utterance processing loop:
* for (;;) {
* start utterance and wait for speech to process
* decoding till end-of-utterance silence will be detected
* print utterance result;
* }
*/
- (void) recognize
{
ad_rec_t *ad;
int16 adbuf[2048];
uint8 utt_started, in_speech;
int32 k;
char const *hyp;
if ((ad = ad_open_dev(cmd_ln_str_r(config, "-adcdev"),
(int) cmd_ln_float32_r(config,
"-samprate"))) == NULL)
E_FATAL("Failed to open audio device\n");
if (ad_start_rec(ad) < 0)
E_FATAL("Failed to start recording\n");
if (ps_start_utt(ps) < 0)
E_FATAL("Failed to start utterance\n");
utt_started = FALSE;
E_INFO("Ready....\n");
for (;;) {
if ((k = ad_read(ad, adbuf, 2048)) < 0)
E_FATAL("Failed to read audio\n");
ps_process_raw(ps, adbuf, k, FALSE, FALSE);
in_speech = ps_get_in_speech(ps);
if (in_speech && !utt_started) {
utt_started = TRUE;
E_INFO("Listening...\n");
}
if (!in_speech && utt_started) {
/* speech -> silence transition, time to start new utterance */
ps_end_utt(ps);
hyp = ps_get_hyp(ps, NULL );
if (hyp != NULL) {
NSString *recognizedString = [NSString stringWithCString: hyp
encoding: NSUTF8StringEncoding];
[self performSelectorOnMainThread: @selector(_recognizedWord:)
withObject: recognizedString
waitUntilDone: NO];
printf("%s\n", hyp);
fflush(stdout);
}
if (ps_start_utt(ps) < 0)
E_FATAL("Failed to start utterance\n");
utt_started = FALSE;
E_INFO("Ready....\n");
}
[NSThread sleepForTimeInterval: 0.01];
}
ad_close(ad);
}
- (void) _startProcessing
{
}
- (void) startListening
{
rv = ps_start_utt(ps);
[NSThread detachNewThreadSelector: @selector(recognize)
toTarget: self
withObject: nil];
}
- (void) stopListening
{
rv = ps_end_utt(ps);
}
@end
@ -56,7 +133,7 @@
+ (GSSpeechRecognitionEngine*)defaultSpeechRecognitionEngine
{
return [[[PocketsphinxSpeechRecognitionEngine alloc] init] autorelease];
return AUTORELEASE([[PocketsphinxSpeechRecognitionEngine alloc] init]);
}
@end