-
Notifications
You must be signed in to change notification settings - Fork 17
Expand file tree
/
Copy pathreturnAPI.tsx
More file actions
205 lines (183 loc) · 8.67 KB
/
returnAPI.tsx
File metadata and controls
205 lines (183 loc) · 8.67 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
// import * as sdk from 'microsoft-cognitiveservices-speech-sdk'
import installCOIServiceWorker from './coi-serviceworker'
import { API, PlaybackStatus } from '../../react-redux&middleware/redux/typesImports';
import {
ApiStatus,
AzureStatus,
ControlStatus,
SRecognition,
StreamTextStatus,
ScribearServerStatus
} from '../../react-redux&middleware/redux/typesImports';
import { useEffect, useState } from 'react';
import { batch, useDispatch, useSelector } from 'react-redux';
import { AzureRecognizer } from './azure/azureRecognizer';
import { Dispatch } from 'redux';
import { Recognizer } from './recognizer';
import { RootState } from '../../store';
import { StreamTextRecognizer } from './streamtext/streamTextRecognizer';
import { TranscriptBlock } from '../../react-redux&middleware/redux/types/TranscriptTypes';
import { WebSpeechRecognizer } from './web-speech/webSpeechRecognizer';
import { WhisperRecognizer } from './whisper/whisperRecognizer';
import { PlaybackRecognizer } from './playback/playbackRecognizer';
import { ScribearRecognizer } from './scribearServer/scribearRecognizer';
import type { SelectedOption } from '../../react-redux&middleware/redux/types/modelSelection';
// import { PlaybackReducer } from '../../react-redux&middleware/redux/reducers/apiReducers';
// controls what api to send and what to do when error handling.
// NOTES: this needs to do everything I think. Handler should be returned which allows
// event call like stop and the event should be returned... (maybe the recognition? idk.)
/*
* === * === * DO NOT DELETE IN ANY CIRCUMSTANCE * === * === *
* === * TRIBUTE TO THE ORIGINAL AUTHOR OF THIS CODE: Will * === *
DO NOT DELETE IN ANY CIRCUMSTANCE
export const returnRecogAPI = (api : ApiStatus, control : ControlStatus, azure : AzureStatus) => {
// const apiStatus = useSelector((state: RootState) => {
// return state.APIStatusReducer as ApiStatus;
// })
// const control = useSelector((state: RootState) => {
// return state.ControlReducer as ControlStatus;
// });
// const azureStatus = useSelector((state: RootState) => {
// return state.AzureReducer as AzureStatus;
// })
const recognition : Promise<any> = getRecognition(api.currentApi, control, azure);
const useRecognition : Object = makeRecognition(api.currentApi);
// const recogHandler : Function = handler(api.currentApi);
return ({ useRecognition, recognition });
}
* === * === * DO NOT DELETE IN ANY CIRCUMSTANCE * === * === *
* === * TRIBUTE TO THE ORIGINAL AUTHOR OF THIS CODE: Will * === *
*/
function toWhisperCode(bcp47: string): string {
// Accept "en", "en-US", etc. Return "en" for "en-US".
if (!bcp47) return "en";
const base = bcp47.split('-')[0].toLowerCase();
const supported = new Set([
'en','es','fr','de','it','pt','nl','sv','da','nb','fi','pl','cs','sk','sl','hr','sr','bg','ro',
'hu','el','tr','ru','uk','ar','he','fa','ur','hi','bn','ta','te','ml','mr','gu','kn','pa',
'id','ms','vi','th','zh','ja','ko'
]);
return supported.has(base) ? base : 'en';
}
const createRecognizer = (currentApi: number, control: ControlStatus, azure: AzureStatus, streamTextConfig: StreamTextStatus, scribearServerStatus: ScribearServerStatus, selectedModelOption: SelectedOption, playbackStatus: PlaybackStatus): Recognizer => {
if (currentApi === API.SCRIBEAR_SERVER) {
return new ScribearRecognizer(scribearServerStatus, selectedModelOption, control.speechLanguage.CountryCode);
} else if (currentApi === API.PLAYBACK) {
return new PlaybackRecognizer(playbackStatus);
}
if (currentApi === API.WEBSPEECH) {
return new WebSpeechRecognizer(null, control.speechLanguage.CountryCode);
} else if (currentApi === API.AZURE_TRANSLATION) {
return new AzureRecognizer(null, control.speechLanguage.CountryCode, azure);
}
else if (currentApi === API.AZURE_CONVERSATION) {
throw new Error("Not implemented");
}
else if (currentApi === API.STREAM_TEXT) {
// Placeholder - this is just WebSpeech for now
return new StreamTextRecognizer(streamTextConfig.streamTextEvent, 'en', streamTextConfig.startPosition);
} else if (currentApi === API.WHISPER) {
return new WhisperRecognizer(
null,
toWhisperCode(control.speechLanguage.CountryCode),
4
);
} else {
throw new Error(`Unexpcted API_CODE: ${currentApi}`);
}
}
/**
* Make a callback function that updates the Redux transcript using new final blocks and new
* in-progress block
*
* We have to do things in this roundabout way to have access to dispatch in a callback function,
* see https://stackoverflow.com/questions/59456816/how-to-call-usedispatch-in-a-callback
* @param dispatch A Redux dispatch function
*/
const updateTranscript = (dispatch: Dispatch) => (newFinalBlocks: Array<TranscriptBlock>, newInProgressBlock: TranscriptBlock): void => {
// console.log(`Updating transcript using these blocks: `, newFinalBlocks, newInProgressBlock)
// batch makes these dispatches only cause one re-rendering
batch(() => {
for (const block of newFinalBlocks) {
dispatch({ type: "transcript/new_final_block", payload: block });
}
dispatch({ type: 'transcript/update_in_progress_block', payload: newInProgressBlock });
})
}
/**
* Syncs up the recognizer with the API selection and listening status
* - Creates new recognizer and stop old ones when API is changed
* - Start / stop recognizer as listening changes
* - Feed any phrase list updates to azure recognizer
*
* @param recog
* @param api
* @param control
* @param azure
*
* @return transcripts, resetTranscript, recogHandler
*/
export const useRecognition = (sRecog: SRecognition, api: ApiStatus, control: ControlStatus,
azure: AzureStatus, streamTextConfig: StreamTextStatus, scribearServerStatus, selectedModelOption: SelectedOption, playbackStatus: PlaybackStatus) => {
const [recognizer, setRecognizer] = useState<Recognizer>();
// TODO: Add a reset button to utitlize resetTranscript
// const [resetTranscript, setResetTranscript] = useState<() => string>(() => () => dispatch('RESET_TRANSCRIPT'));
const dispatch = useDispatch();
// Register service worker for whisper on launch
useEffect(() => {
installCOIServiceWorker();
}, [])
// Change recognizer, if api changed
useEffect(() => {
console.log("UseRecognition, switching to new recognizer: ", api.currentApi);
let newRecognizer: Recognizer | null;
try {
// Create new recognizer, and subscribe to its events
newRecognizer = createRecognizer(api.currentApi, control, azure, streamTextConfig, scribearServerStatus, selectedModelOption, playbackStatus);
newRecognizer.onTranscribed(updateTranscript(dispatch));
setRecognizer(newRecognizer)
// Start new recognizer if necessary
if (control.listening) {
console.log("UseRecognition, attempting to start recognizer after switching")
newRecognizer.start()
}
} catch (e) {
console.log("UseRecognition, failed to switch to new recognizer: ", e);
}
return () => {
// Stop current recognizer when switching to another one, if possible
newRecognizer?.stop();
}
}, [api.currentApi, azure, control, streamTextConfig, playbackStatus, scribearServerStatus, selectedModelOption]);
// Start / stop recognizer, if listening toggled
useEffect(() => {
if (!recognizer) { // whipser won't have recogHandler
return;
}
if (control.listening) {
console.log("UseRecognition, sending start signal to recognizer")
recognizer.start();
} else if (!control.listening) {
console.log("UseRecognition, sending stop signal to recognizer")
recognizer.stop();
}
}, [control.listening]);
// Update domain phrases for azure recognizer
useEffect(() => {
console.log("UseRecognition, changing azure phrases", azure.phrases);
if (api.currentApi === API.AZURE_TRANSLATION && recognizer) {
(recognizer as AzureRecognizer).addPhrases(azure.phrases);
}
}, [azure.phrases]);
// TODO: whisper's transcript is not in redux store but only in sessionStorage at the moment.
let transcript: string = useSelector((state: RootState) => {
return state.TranscriptReducer.transcripts[0].toString()
});
// if (api.currentApi === API.WHISPER) {
// // TODO: inefficient to get it from sessionStorage everytime
// // TODO: add whisper_transcript to redux store after integrating "whisper" folder (containing stream.js) into ScribeAR
// transcript = sessionStorage.getItem('whisper_transcript') || '';
// return transcript;
// }
return transcript;
}