Fix compilation errors

This commit is contained in:
Gregory John Casamento 2020-02-01 05:43:06 -05:00
parent b1f9f1fd78
commit ea2fe89e5f
8 changed files with 73 additions and 262 deletions

View file

@ -1,52 +0,0 @@
#import <Foundation/Foundation.h>
/**
* GSSpeechEngine is an abstract speech server. One concrete subclass should
* be implemented for each speech engine. Currently, only one may be compiled
* in to the speech server at any given time. This limitation may be removed
* in future if pluggable speech engines are considered beneficial.
*/
@interface GSSpeechEngine : NSObject
/**
* Begin speaking the specified string.
*/
- (void)startSpeaking: (NSString*)aString notifyWhenDone: (id)aDelegate;
/**
* Stop speaking.
*/
- (void)stopSpeaking;
/**
* Returns YES if the engine is currently outputting speech.
*/
- (BOOL)isSpeaking;
/**
* Returns an array of voices supported by this speech synthesizer.
*/
- (NSArray*)voices;
/**
* Sets the voice.
*/
- (void)setVoice: (NSString*)aVoice;
/**
* Returns the current voice.
*/
- (NSString*)voice;
/**
* Returns the name of the default voice for this speech engine.
*/
- (NSString*)defaultVoice;
@end
@interface NSObject (GSSpeechEngineDelegate)
/**
* Called when the speech engine has finished speaking a phrase. Should be
* used to notify the original caller.
*/
- (void)didFinishSpeaking: (BOOL)didFinish;
@end
@interface GSSpeechEngine (Default)
/**
* Returns a new instance of the default speech engine.
*/
+ (GSSpeechEngine*)defaultSpeechEngine;
@end

View file

@ -1,15 +0,0 @@
#import "GSSpeechEngine.h"
/**
* Dummy implementation of a speech engine. Doesn't do anything.
*/
@implementation GSSpeechEngine
+ (GSSpeechEngine*)defaultSpeechEngine { return [[self new] autorelease]; }
- (void)startSpeaking: (NSString*)aString notifyWhenDone: (id)anObject{}
- (void)stopSpeaking {}
- (BOOL)isSpeaking { return NO; }
- (NSArray*)voices { return [NSArray arrayWithObject: @"default"]; }
- (void)setVoice: (NSString*)aVoice {}
- (NSString*)voice { return @"default"; }
- (NSString*)defaultVoice { return @"default"; }
@end

View file

@ -0,0 +1,20 @@
#import <Foundation/Foundation.h>
/**
* GSSpeechRecognitionEngine is an abstract speech server. One concrete subclass should
* be implemented for each speech engine. Currently, only one may be compiled
* in to the speech server at any given time. This limitation may be removed
* in future if pluggable speech engines are considered beneficial.
*/
@interface GSSpeechRecognitionEngine : NSObject
@end
@interface NSObject (GSSpeechRecognitionEngineDelegate)
@end
@interface GSSpeechRecognitionEngine (Default)
/**
* Returns a new instance of the default speech engine.
*/
+ (GSSpeechRecognitionEngine*)defaultSpeechRecognitionEngine;
@end

View file

@ -0,0 +1,8 @@
#import "GSSpeechRecognitionEngine.h"
/**
* Dummy implementation of a speech engine. Doesn't do anything.
*/
@implementation GSSpeechRecognitionEngine
+ (GSSpeechRecognitionEngine*)defaultSpeechEngine { return [[self new] autorelease]; }
@end

View file

@ -1,44 +1,15 @@
#import <Foundation/Foundation.h>
@class GSSpeechEngine;
@class GSSpeechRecognitionEngine;
/**
* GSSpeechServer handles all of the engine-agnostic operations. Currently,
* GSSpeechRecognitionServer handles all of the engine-agnostic operations. Currently,
* there aren't any, but when the on-screen text interface is added it should
* go in here.
*/
@interface GSSpeechServer : NSObject {
GSSpeechEngine *engine;
@interface GSSpeechRecognitionServer : NSObject {
GSSpeechRecognitionEngine *engine;
}
/**
* Returns a shared instance of the speech server.
*/
+ (id)sharedServer;
/**
* Begins speaking the string specified by the first argument. Calls the
* delegate method on the client when done.
*/
- (BOOL)startSpeakingString: (NSString*)aString notifyWhenDone: (id)client;
/**
* Stop speaking.
*/
- (void)stopSpeaking;
/**
* Returns YES if the engine is currently outputting speech.
*/
- (BOOL)isSpeaking;
/**
* Returns an array of voices supported by this speech synthesizer.
*/
- (NSArray*)voices;
/**
* Sets the voice.
*/
- (void)setVoice: (NSString*)aVoice;
/**
* Returns the current voice.
*/
- (NSString*)voice;
/**
* Returns the name of the default voice.
*/
- (NSString*)defaultVoice;
@end

View file

@ -1,72 +1,47 @@
#import "GSSpeechServer.h"
#import "GSSpeechEngine.h"
#import "GSSpeechSynthesizer.h"
#import "GSSpeechRecognitionServer.h"
#import "GSSpeechRecognitionEngine.h"
#import "GSSpeechRecognizer.h"
#import <Foundation/Foundation.h>
static GSSpeechServer *sharedInstance;
static GSSpeechRecognitionServer *sharedInstance;
@implementation GSSpeechRecognitionServer
@implementation GSSpeechServer
+ (void)initialize
{
sharedInstance = [self new];
sharedInstance = [self new];
}
+ (void)start
{
NSConnection *connection = [NSConnection defaultConnection];
[connection setRootObject: sharedInstance];
if (NO == [connection registerName: @"GSSpeechServer"])
{
return;
}
[[NSRunLoop currentRunLoop] run];
NSConnection *connection = [NSConnection defaultConnection];
[connection setRootObject: sharedInstance];
if (NO == [connection registerName: @"GSSpeechRecognitionServer"])
{
return;
}
[[NSRunLoop currentRunLoop] run];
}
+ (id)sharedServer
{
return sharedInstance;
return sharedInstance;
}
- (id)init
{
if (nil == (self = [super init])) { return nil; }
engine = [GSSpeechEngine defaultSpeechEngine];
if (nil == engine)
{
[self release];
return nil;
}
return self;
if (nil == (self = [super init])) { return nil; }
engine = [GSSpeechRecognitionEngine defaultSpeechRecognitionEngine];
if (nil == engine)
{
[self release];
return nil;
}
return self;
}
- (id)newSynthesizer
- (id)newRecognizer
{
return [[GSSpeechSynthesizer new] autorelease];
}
- (BOOL)startSpeakingString: (NSString*)aString notifyWhenDone: (id)client
{
[engine stopSpeaking];
[engine startSpeaking: aString notifyWhenDone: client];
return YES;
}
- (void)stopSpeaking
{
[engine stopSpeaking];
}
- (BOOL)isSpeaking
{
return [engine isSpeaking];
}
- (NSArray*)voices
{
return [engine voices];
}
- (oneway void)setVoice: (NSString*)aVoice
{
[engine setVoice: aVoice];
}
- (NSString*)voice
{
return [engine voice];
}
- (NSString*)defaultVoice
{
return [engine defaultVoice];
return [[GSSpeechRecognizer new] autorelease];
}
@end

View file

@ -1,126 +1,30 @@
#import "GSSpeechEngine.h"
#include <flite/flite.h>
cst_voice *register_cmu_us_kal16();
#import "GSSpeechRecognitionEngine.h"
#include <pocketsphinx/pocketsphinx.h>
/**
* Implementation of a speech engine using flite. This should be the default
* Implementation of a speech engine using pocketsphinx. This should be the default
* for resource-constrained platforms.
*/
@interface FliteSpeechEngine : GSSpeechEngine {
/** The audio device used for sound output. */
cst_audiodev *ad;
/** The current voice. Only one supported at the moment. */
cst_voice *v;
/** Flag set to tell the playback thread to exit. */
volatile BOOL shouldEndSpeaking;
/** Flag indicating whether the engine is currently speaking. */
volatile BOOL isSpeaking;
@interface PocketsphinxSpeechEngine : GSSpeechRecognitionEngine
{
}
@end
@implementation FliteSpeechEngine
@implementation PocketsphinxSpeechEngine
+ (void)initialize
{
flite_init();
}
- (id)init
{
if (nil == (self = [super init])) { return nil; }
// Only one voice supported by flite unless others are compiled in.
v = register_cmu_us_kal16();
if (NULL == v)
{
[self release];
return nil;
}
// Each wave should be the same format.
cst_wave *w = flite_text_to_wave("test", v);
ad = audio_open(w->sample_rate, w->num_channels, CST_AUDIO_LINEAR16);
delete_wave(w);
if (NULL == ad)
{
[self release];
return nil;
}
return self;
}
- (void)sayString: (NSArray*)args
{
id pool = [NSAutoreleasePool new];
NSString *aString = [args objectAtIndex: 0];
int i,n,r;
int num_shorts;
BOOL didFinish = YES;
cst_wave *w = flite_text_to_wave([aString UTF8String], v);
num_shorts = w->num_samples * w->num_channels;
for (i=0; i < num_shorts; i += r/2)
{
if (num_shorts > i+CST_AUDIOBUFFSIZE)
{
n = CST_AUDIOBUFFSIZE;
}
else
{
n = num_shorts-i;
}
r = audio_write(ad, &w->samples[i], n*2);
if (shouldEndSpeaking)
{
didFinish = NO;
break;
}
}
isSpeaking = NO;
NS_DURING
[[args objectAtIndex: 1] didFinishSpeaking: didFinish];
NS_HANDLER
NS_ENDHANDLER;
[args release];
[pool release];
delete_wave(w);
}
- (void)startSpeaking: (NSString*)aString notifyWhenDone: (id)aDelegate
{
[[[aDelegate delegate] connectionForProxy] enableMultipleThreads];
NSArray *arg = [[NSArray alloc] initWithObjects: aString, aDelegate, nil];
shouldEndSpeaking = NO;
isSpeaking = YES;
[NSThread detachNewThreadSelector: @selector(sayString:)
toTarget: self
withObject: arg];
}
- (BOOL)isSpeaking
{
return isSpeaking;
}
- (void)stopSpeaking
{
shouldEndSpeaking = YES;
// Spin until the other thread has died.
while (isSpeaking) {}
}
- (void)dealloc
{
[self stopSpeaking];
audio_close(ad);
[super dealloc];
}
@end
@implementation GSSpeechEngine (Flite)
+ (GSSpeechEngine*)defaultSpeechEngine
@implementation GSSpeechRecognitionEngine (Pocketsphinx)
+ (GSSpeechRecognitionEngine*)defaultSpeechRecognitionEngine
{
return [[[FliteSpeechEngine alloc] init] autorelease];
return [[[PocketsphinxSpeechRecognitionEngine alloc] init] autorelease];
}
@end

View file

@ -6,7 +6,7 @@
int main(void)
{
[NSAutoreleasePool new];
[GSSpeechRecognitonServer start];
return 0;
[NSAutoreleasePool new];
[GSSpeechRecognitionServer start];
return 0;
}