luxlabs header browser preview


@class _EARSpeechRecognitionAudioBuffer, NSDictionary, EARVoiceCommandActiveSet, _EARSpeakerCodeInfo, _EARRecognitionMetrics, NSObject, NSString, _EARFormatter, NSSet, _EARSpeechModelInfo, NSArray, NSData, NSNumber, _EARTokenizer;
@protocol OS_dispatch_queue;

@interface _EARSpeechRecognizer : NSObject {
    NSObject<OS_dispatch_queue> *_formatterQueue;
    _EARFormatter *_formatter;
    NSObject<OS_dispatch_queue> *_trainingQueue;
    struct shared_ptr<quasar::SpeakerCodeTraining> { struct SpeakerCodeTraining *__ptr_; struct __shared_weak_count *__cntrl_; } _training;
    struct shared_ptr<const quasar::VoiceCommandActiveSetCompilation> { struct VoiceCommandActiveSetCompilation *__ptr_; struct __shared_weak_count *__cntrl_; } _voiceCommandCompilation;
    NSSet *_endsOfSentencePunctuations;
    struct shared_ptr<quasar::SpeechRecognizer> { struct SpeechRecognizer *__ptr_; struct __shared_weak_count *__cntrl_; } _recognizer;
    _EARSpeechRecognitionAudioBuffer *_currentAudioBuffer;
    struct weak_ptr<ResultStreamWrapper> { struct ResultStreamWrapper *__ptr_; struct __shared_weak_count *__cntrl_; } _currentResultStreamWrapper;
    NSString *_currentLanguage;
    NSString *_currentTask;
    unsigned long long _currentSamplingRate;
    NSObject<OS_dispatch_queue> *_recognitionQueue;
    NSDictionary *_muxIdMask;
    NSDictionary *_muxIdReverseMask;
    NSSet *_muxIds;
    NSArray *_userProfiles;
    struct vector<std::string, std::allocator<std::string>> { void *__begin_; void *__end_; struct __compressed_pair<std::string *, std::allocator<std::string>> { void *__value_; } __end_cap_; } _rightContextTokens;
    struct shared_ptr<EARModelInitializeContext> { struct EARModelInitializeContext *__ptr_; struct __shared_weak_count *__cntrl_; } _modelInitializeContext;
    NSArray *_onScreenContextForEditLme;
    BOOL _loadLmeForVoiceCommand;
    _EARTokenizer *_tokenizer;
}

@property (readonly, nonatomic) BOOL isContinuousListening;
@property (readonly, nonatomic) NSString *configPath;
@property (readonly, nonatomic) unsigned short itnEnablingFlags;
@property (retain, nonatomic) NSNumber *overrideDoServerSideEndpointing;
@property (copy, nonatomic) NSData *userProfileData;
@property (copy, nonatomic) NSData *jitProfileData;
@property (readonly, nonatomic) _EARSpeechModelInfo *modelInfo;
@property (readonly, nonatomic) _EARSpeakerCodeInfo *speakerCodeInfo;
@property (nonatomic) BOOL detectUtterances;
@property (nonatomic) BOOL concatenateUtterances;
@property (nonatomic) BOOL allowUtteranceDelay;
@property (nonatomic) BOOL formatAcrossUtterances;
@property (nonatomic) double endpointStart;
@property (nonatomic) BOOL recognizeEagerCandidates;
@property (nonatomic) BOOL farField;
@property (nonatomic) BOOL highPriority;
@property (nonatomic) BOOL enableSpeakerCodeTraining;
@property (nonatomic) double maximumRecognitionDuration;
@property (copy, nonatomic) NSDictionary *recognitionReplacements;
@property (copy, nonatomic) NSDictionary *recognitionConfidenceSubtraction;
@property (copy, nonatomic) NSArray *leftContext;
@property (copy, nonatomic) NSString *inputOrigin;
@property (copy, nonatomic) NSString *deviceId;
@property (copy, nonatomic) NSString *refTranscriptForErrorBlaming;
@property (copy, nonatomic) NSString *bluetoothDeviceId;
@property (copy, nonatomic) NSString *userId;
@property (copy, nonatomic) NSString *sessionId;
@property (copy, nonatomic) NSArray *extraLmList;
@property (copy, nonatomic) NSArray *scoreNbestExtraLmList;
@property (nonatomic) BOOL scoreNbest;
@property (nonatomic) double latitude;
@property (nonatomic) double longitude;
@property (nonatomic) BOOL disableAutoPunctuation;
@property (nonatomic) BOOL disablePartialResults;
@property (nonatomic) BOOL enableVoiceCommands;
@property (nonatomic) BOOL shouldGenerateVoiceCommandCandidates;
@property (readonly, nonatomic) EARVoiceCommandActiveSet *voiceCommandActiveSet;
@property (nonatomic) BOOL recognizeEmoji;
@property (copy, nonatomic) NSString *rightContext;
@property (copy, nonatomic) NSString *selectedText;
@property (copy, nonatomic) NSString *aneContext;
@property (copy, nonatomic) NSString *cpuContext;
@property (copy, nonatomic) NSString *gpuContext;
@property (copy, nonatomic) _EARRecognitionMetrics *recognitionMetrics;
@property (copy, nonatomic) NSArray *leftContextForItn;

+ (void)initialize;
+ (void)compileRecognizerModelsWithConfiguration:(id)a0;
+ (id)maximumSupportedConfigurationVersion;
+ (id)minimumSupportedConfigurationVersion;
+ (void)purgeCompiledRecognizerModelsWithConfiguration:(id)a0;
+ (id)rawTokenResultsFromRecognitionResults:(id)a0;

- (void)setActiveConfiguration:(id)a0;
- (void)pauseRecognition;
- (id)activeConfiguration;
- (id).cxx_construct;
- (void)setUserProfile:(id)a0;
- (id)initWithConfiguration:(id)a0;
- (void).cxx_destruct;
- (id)_tokenizer;
- (id)initWithConfiguration:(id)a0 withGeneralVoc:(id)a1 withLexiconEnh:(id)a2 withItnEnh:(id)a3;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3 activeConfiguration:(id)a4 modelLoadingOptions:(id)a5 enableSpeakerCodeTraining:(BOOL)a6;
- (BOOL)isSpeakerCodeTrainingSupported:(id)a0;
- (id)runRecognitionWithResultStream:(id)a0;
- (struct shared_ptr<quasar::RecogAudioBufferBase> { struct RecogAudioBufferBase *x0; struct __shared_weak_count *x1; })_audioBufferWithLangauge:(id)a0 task:(id)a1 samplingRate:(unsigned long long)a2 userProfileData:(id)a3 resultStream:(struct shared_ptr<quasar::RecogResultStreamBase> { struct RecogResultStreamBase *x0; struct __shared_weak_count *x1; })a4;
- (void)_restartActiveRecognition;
- (void)_setProfileContainers:(id)a0 muxIds:(id)a1;
- (id)_unmaskMuxPackages:(id)a0;
- (void)_waitForAsyncRecogToFinish;
- (void)_waitForInitialization;
- (BOOL)canCloneIsFinalAsLastNonFinal;
- (void)cancelRecognition;
- (void)dumpModelVirtualMemoryInfo;
- (int)getCachedTokensSize;
- (void)getFormatterWithBlock:(id /* block */)a0;
- (struct shared_ptr<quasar::SpeechRecognizer> { struct SpeechRecognizer *x0; struct __shared_weak_count *x1; })getRecognizer;
- (id)initWithConfiguration:(id)a0 overrideConfigFiles:(id)a1;
- (id)initWithConfiguration:(id)a0 overrideConfigFiles:(id)a1 generalVoc:(id)a2 lexiconEnh:(id)a3 itnEnh:(id)a4;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 generalVoc:(id)a2 lexiconEnh:(id)a3 itnEnh:(id)a4;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5 language:(id)a6;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5 language:(id)a6 activeConfiguration:(id)a7;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5 language:(id)a6 activeConfiguration:(id)a7 enableSpeakerCodeTraining:(BOOL)a8;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5 language:(id)a6 activeConfiguration:(id)a7 modelLoadingOptions:(id)a8 enableSpeakerCodeTraining:(BOOL)a9;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5 language:(id)a6 activeConfiguration:(id)a7 modelLoadingOptions:(id)a8 enableSpeakerCodeTraining:(BOOL)a9 supportEmojiRecognition:(BOOL)a10;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5 language:(id)a6 activeConfiguration:(id)a7 modelLoadingOptions:(id)a8 enableSpeakerCodeTraining:(BOOL)a9 supportEmojiRecognition:(BOOL)a10 voiceCommandActiveSet:(id)a11;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 generalVoc:(id)a3 lexiconEnh:(id)a4 itnEnh:(id)a5 language:(id)a6 activeConfiguration:(id)a7 modelLoadingOptions:(id)a8 enableSpeakerCodeTraining:(BOOL)a9 supportEmojiRecognition:(BOOL)a10 voiceCommandActiveSet:(id)a11 modelContextDelegate:(id)a12;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3 activeConfiguration:(id)a4;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3 activeConfiguration:(id)a4 enableSpeakerCodeTraining:(BOOL)a5;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3 activeConfiguration:(id)a4 modelLoadingOptions:(id)a5 enableSpeakerCodeTraining:(BOOL)a6 supportEmojiRecognition:(BOOL)a7;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3 activeConfiguration:(id)a4 modelLoadingOptions:(id)a5 enableSpeakerCodeTraining:(BOOL)a6 supportEmojiRecognition:(BOOL)a7 voiceCommandActiveSet:(id)a8;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3 activeConfiguration:(id)a4 modelLoadingOptions:(id)a5 enableSpeakerCodeTraining:(BOOL)a6 supportEmojiRecognition:(BOOL)a7 voiceCommandActiveSet:(id)a8 modelContextDelegate:(id)a9;
- (id)initWithConfiguration:(id)a0 overrides:(id)a1 overrideConfigFiles:(id)a2 language:(id)a3 activeConfiguration:(id)a4 modelLoadingOptions:(id)a5 enableSpeakerCodeTraining:(BOOL)a6 supportEmojiRecognition:(BOOL)a7 voiceCommandActiveSet:(id)a8 modelContextDelegate:(id)a9 enableItn:(BOOL)a10;
- (id)initWithConfiguration:(id)a0 useQuasarFormatter:(BOOL)a1;
- (id)initWithConfiguration:(id)a0 useQuasarFormatter:(BOOL)a1 activeConfiguration:(id)a2;
- (id)initWithConfiguration:(id)a0 withLanguage:(id)a1 withSdapiConfig:(id)a2;
- (void)interruptTraining;
- (id)recognitionResultsWithAudioData:(id)a0 userProfileData:(id)a1 language:(id)a2 task:(id)a3 samplingRate:(unsigned long long)a4;
- (id)recognitionResultsWithAudioData:(id)a0 userProfileData:(id)a1 language:(id)a2 task:(id)a3 samplingRate:(unsigned long long)a4 extraLanguageModel:(id)a5;
- (id)recognitionStatistics;
- (id)recognitionUtteranceInfos;
- (id)recognitionUtterenceStatistics;
- (void)requestEagerResult:(id)a0;
- (void)resumeRecognitionWithLeftContext:(id)a0 rightContext:(id)a1 selectedText:(id)a2;
- (id)runRecognitionWithResultStream:(id)a0 language:(id)a1 task:(id)a2 samplingRate:(unsigned long long)a3;
- (id)runRecognitionWithResultStream:(id)a0 language:(id)a1 task:(id)a2 samplingRate:(unsigned long long)a3 userProfileData:(id)a4 speakerCodeWriter:(id)a5;
- (id)runRecognitionWithResultStream:(id)a0 speakerCodeWriter:(id)a1 language:(id)a2 task:(id)a3 samplingRate:(unsigned long long)a4;
- (void)setAlternateRawRecognitionTokenSausage:(id)a0;
- (void)setLeftContextText:(id)a0;
- (struct vector<std::string, std::allocator<std::string>> { void *x0; void *x1; struct __compressed_pair<std::string *, std::allocator<std::string>> { void *x0; } x2; })splitWithTokenizer:(id)a0 isLeftContext:(BOOL)a1 shouldTruncate:(BOOL)a2 outTokensInVocab:(id *)a3;
- (struct vector<std::string, std::allocator<std::string>> { void *x0; void *x1; struct __compressed_pair<std::string *, std::allocator<std::string>> { void *x0; } x2; })splitWithTokenizer:(id)a0 outTokensInVocab:(id *)a1 isLeftContext:(BOOL)a2;
- (id)testFormattingWithOneBestResults:(id)a0 uttMillis:(id)a1;
- (id)tokenizeTextFromEnd:(id)a0 withLimit:(unsigned long long)a1 outTokensInVocab:(id *)a2;
- (void)updateJitProfileData:(id)a0;
- (void)updateUserProfileData:(id)a0;
- (void)writeRecordedStateAccesses;

@end