-
Notifications
You must be signed in to change notification settings - Fork 64
/
engine.py
608 lines (467 loc) · 18 KB
/
engine.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
import json
import os
import time
import uuid
import warnings
from enum import Enum
from typing import Optional
import azure.cognitiveservices.speech as speechsdk
import boto3
import pvcheetah
import pvleopard
import requests
import soundfile
import torch
import whisper
from google.cloud import speech
from ibm_cloud_sdk_core.authenticators import IAMAuthenticator
from ibm_watson import SpeechToTextV1
from languages import (
LANGUAGE_TO_CODE,
Languages,
)
from normalizer import Normalizer
warnings.filterwarnings(
"ignore", message="FP16 is not supported on CPU; using FP32 instead"
)
warnings.filterwarnings(
"ignore", message="Performing inference on CPU when CUDA is available"
)
NUM_THREADS = 1
os.environ["OMP_NUM_THREADS"] = str(NUM_THREADS)
os.environ["MKL_NUM_THREADS"] = str(NUM_THREADS)
torch.set_num_threads(NUM_THREADS)
torch.set_num_interop_threads(NUM_THREADS)
class Engines(Enum):
AMAZON_TRANSCRIBE = "AMAZON_TRANSCRIBE"
AZURE_SPEECH_TO_TEXT = "AZURE_SPEECH_TO_TEXT"
GOOGLE_SPEECH_TO_TEXT = "GOOGLE_SPEECH_TO_TEXT"
GOOGLE_SPEECH_TO_TEXT_ENHANCED = "GOOGLE_SPEECH_TO_TEXT_ENHANCED"
IBM_WATSON_SPEECH_TO_TEXT = "IBM_WATSON_SPEECH_TO_TEXT"
WHISPER_TINY = "WHISPER_TINY"
WHISPER_BASE = "WHISPER_BASE"
WHISPER_SMALL = "WHISPER_SMALL"
WHISPER_MEDIUM = "WHISPER_MEDIUM"
WHISPER_LARGE = "WHISPER_LARGE"
WHISPER_LARGE_V2 = "WHISPER_LARGE_V2"
WHISPER_LARGE_V3 = "WHISPER_LARGE_V3"
PICOVOICE_CHEETAH = "PICOVOICE_CHEETAH"
PICOVOICE_LEOPARD = "PICOVOICE_LEOPARD"
class Engine(object):
def transcribe(self, path: str) -> str:
raise NotImplementedError()
def audio_sec(self) -> float:
raise NotImplementedError()
def process_sec(self) -> float:
raise NotImplementedError()
def delete(self) -> None:
raise NotImplementedError()
def __str__(self) -> str:
raise NotImplementedError()
@classmethod
def create(cls, x: Engines, language: Languages, **kwargs):
if x is Engines.AMAZON_TRANSCRIBE:
return AmazonTranscribeEngine(language=language)
elif x is Engines.AZURE_SPEECH_TO_TEXT:
return AzureSpeechToTextEngine(language=language, **kwargs)
elif x is Engines.GOOGLE_SPEECH_TO_TEXT:
return GoogleSpeechToTextEngine(language=language)
elif x is Engines.GOOGLE_SPEECH_TO_TEXT_ENHANCED:
return GoogleSpeechToTextEnhancedEngine(language=language)
elif x is Engines.WHISPER_TINY:
return WhisperTiny(language=language)
elif x is Engines.WHISPER_BASE:
return WhisperBase(language=language)
elif x is Engines.WHISPER_SMALL:
return WhisperSmall(language=language)
elif x is Engines.WHISPER_MEDIUM:
return WhisperMedium(language=language)
elif x is Engines.WHISPER_LARGE:
return WhisperLarge(language=language)
elif x is Engines.WHISPER_LARGE_V2:
return WhisperLargeV2(language=language)
elif x is Engines.WHISPER_LARGE_V3:
return WhisperLargeV3(language=language)
elif x is Engines.PICOVOICE_CHEETAH:
return PicovoiceCheetahEngine(language=language, **kwargs)
elif x is Engines.PICOVOICE_LEOPARD:
return PicovoiceLeopardEngine(language=language, **kwargs)
elif x is Engines.IBM_WATSON_SPEECH_TO_TEXT:
return IBMWatsonSpeechToTextEngine(language=language, **kwargs)
else:
raise ValueError(f"Cannot create {cls.__name__} of type `{x}`")
class AmazonTranscribeEngine(Engine):
def __init__(self, language: Languages):
self._normalizer = Normalizer.create(language)
self._language_code = LANGUAGE_TO_CODE[language]
self._s3_client = boto3.client("s3")
self._s3_bucket = str(uuid.uuid4())
self._s3_client.create_bucket(
ACL="private",
Bucket=self._s3_bucket,
CreateBucketConfiguration={"LocationConstraint": "us-west-2"},
)
self._transcribe_client = boto3.client("transcribe")
def transcribe(self, path: str) -> str:
cache_path = path.replace(".flac", ".aws")
if os.path.exists(cache_path):
with open(cache_path) as f:
res = f.read()
return self._normalizer.normalize(res)
job_name = str(uuid.uuid4())
s3_object = os.path.basename(path)
self._s3_client.upload_file(path, self._s3_bucket, s3_object)
self._transcribe_client.start_transcription_job(
TranscriptionJobName=job_name,
Media={
"MediaFileUri": f"https://s3-us-west-2.amazonaws.com/{self._s3_bucket}/{s3_object}"
},
MediaFormat="flac",
LanguageCode=self._language_code,
)
while True:
status = self._transcribe_client.get_transcription_job(
TranscriptionJobName=job_name
)
job_status = status["TranscriptionJob"]["TranscriptionJobStatus"]
if job_status == "COMPLETED":
break
elif job_status == "FAILED":
error = status["TranscriptionJob"].get("FailureReason", "Unknown error")
raise RuntimeError(f"Amazon Transcribe job {job_name} failed: {error}")
time.sleep(1)
content = requests.get(
status["TranscriptionJob"]["Transcript"]["TranscriptFileUri"]
)
res = json.loads(content.content.decode("utf8"))["results"]["transcripts"][0]["transcript"]
with open(cache_path, "w") as f:
f.write(res)
res = self._normalizer.normalize(res)
return res
def audio_sec(self) -> float:
return -1.0
def process_sec(self) -> float:
return -1.0
def delete(self) -> None:
response = self._s3_client.list_objects_v2(Bucket=self._s3_bucket)
while response["KeyCount"] > 0:
self._s3_client.delete_objects(
Bucket=self._s3_bucket,
Delete={
"Objects": [{"Key": obj["Key"]} for obj in response["Contents"]]
},
)
response = self._s3_client.list_objects_v2(Bucket=self._s3_bucket)
self._s3_client.delete_bucket(Bucket=self._s3_bucket)
def __str__(self):
return "Amazon Transcribe"
class AzureSpeechToTextEngine(Engine):
def __init__(
self,
azure_speech_key: str,
azure_speech_location: str,
language: Languages,
):
self._normalizer = Normalizer.create(language)
self._language_code = LANGUAGE_TO_CODE[language]
self._azure_speech_key = azure_speech_key
self._azure_speech_location = azure_speech_location
def transcribe(self, path: str) -> str:
cache_path = path.replace(".flac", ".ms")
if os.path.exists(cache_path):
with open(cache_path, "r") as f:
res = f.read()
return self._normalizer.normalize(res)
wav_path = path.replace(".flac", ".wav")
soundfile.write(
wav_path,
soundfile.read(path, dtype="int16")[0],
samplerate=16000,
)
speech_config = speechsdk.SpeechConfig(
subscription=self._azure_speech_key,
region=self._azure_speech_location,
speech_recognition_language=self._language_code,
)
audio_config = speechsdk.audio.AudioConfig(filename=wav_path)
speech_recognizer = speechsdk.SpeechRecognizer(
speech_config=speech_config,
audio_config=audio_config,
)
res = ""
def recognized_cb(evt):
if evt.result.reason == speechsdk.ResultReason.RecognizedSpeech:
nonlocal res
res += " " + evt.result.text
done = False
def stop_cb(_):
nonlocal done
done = True
speech_recognizer.recognized.connect(recognized_cb)
speech_recognizer.session_stopped.connect(stop_cb)
speech_recognizer.canceled.connect(stop_cb)
speech_recognizer.start_continuous_recognition()
while not done:
time.sleep(0.5)
speech_recognizer.stop_continuous_recognition()
os.remove(wav_path)
with open(cache_path, "w") as f:
f.write(res)
res = self._normalizer.normalize(res)
return res
def audio_sec(self) -> float:
return -1.0
def process_sec(self) -> float:
return -1.0
def delete(self) -> None:
pass
def __str__(self) -> str:
return "Microsoft Azure Speech-to-text"
class GoogleSpeechToTextEngine(Engine):
def __init__(
self,
language: Languages,
cache_extension: str = ".ggl",
model: Optional[str] = None,
):
self._normalizer = Normalizer.create(language)
self._language_code = LANGUAGE_TO_CODE[language]
self._client = speech.SpeechClient()
self._config = speech.RecognitionConfig(
encoding=speech.RecognitionConfig.AudioEncoding.FLAC,
sample_rate_hertz=16000,
language_code=self._language_code,
model=model,
)
self._cache_extension = cache_extension
def transcribe(self, path: str) -> str:
cache_path = path.replace(".flac", self._cache_extension)
if os.path.exists(cache_path):
with open(cache_path) as f:
res = f.read()
return self._normalizer.normalize(res)
with open(path, "rb") as f:
content = f.read()
audio = speech.RecognitionAudio(content=content)
response = self._client.recognize(config=self._config, audio=audio)
res = " ".join(result.alternatives[0].transcript for result in response.results)
with open(cache_path, "w") as f:
f.write(res)
res = self._normalizer.normalize(res)
return res
def audio_sec(self) -> float:
return -1.0
def process_sec(self) -> float:
return -1.0
def delete(self) -> None:
pass
def __str__(self) -> str:
return "Google Speech-to-Text"
class GoogleSpeechToTextEnhancedEngine(GoogleSpeechToTextEngine):
def __init__(self, language: Languages):
if language != Languages.EN:
raise ValueError(
"GOOGLE_SPEECH_TO_TEXT_ENHANCED engine only supports EN language"
)
super().__init__(language=language, cache_extension=".ggle", model="video")
def __str__(self) -> str:
return "Google Speech-to-Text Enhanced"
class IBMWatsonSpeechToTextEngine(Engine):
def __init__(
self,
watson_speech_to_text_api_key: str,
watson_speech_to_text_url: str,
language: Languages,
):
if language != Languages.EN:
raise ValueError(
"IBM_WATSON_SPEECH_TO_TEXT engine only supports EN language"
)
self._normalizer = Normalizer.create(language)
self._service = SpeechToTextV1(
authenticator=IAMAuthenticator(watson_speech_to_text_api_key)
)
self._service.set_service_url(watson_speech_to_text_url)
def transcribe(self, path: str) -> str:
cache_path = path.replace(".flac", ".ibm")
if os.path.exists(cache_path):
with open(cache_path, "r") as f:
res = f.read()
return self._normalizer.normalize(res)
with open(path, "rb") as f:
response = self._service.recognize(
audio=f,
content_type="audio/flac",
smart_formatting=True,
end_of_phrase_silence_time=15,
).get_result()
res = ""
if response and ("results" in response) and response["results"]:
res = response["results"][0]["alternatives"][0]["transcript"]
with open(cache_path, "w") as f:
f.write(res)
res = self._normalizer.normalize(res)
return res
def audio_sec(self) -> float:
return -1.0
def process_sec(self) -> float:
return -1.0
def delete(self) -> None:
pass
def __str__(self) -> str:
return "IBM Watson Speech-to-Text"
class Whisper(Engine):
SAMPLE_RATE = 16000
LANGUAGE_TO_WHISPER_CODE = {
Languages.EN: "en",
Languages.DE: "de",
Languages.ES: "es",
Languages.FR: "fr",
Languages.IT: "it",
Languages.PT_PT: "pt",
Languages.PT_BR: "pt",
}
def __init__(self, cache_extension: str, model: str, language: Languages):
self._model = whisper.load_model(model, device="cpu")
self._cache_extension = cache_extension
self._language_code = self.LANGUAGE_TO_WHISPER_CODE[language]
self._normalizer = Normalizer.create(language)
self._audio_sec = 0.0
self._proc_sec = 0.0
def transcribe(self, path: str) -> str:
audio, sample_rate = soundfile.read(path, dtype="int16")
assert sample_rate == self.SAMPLE_RATE
self._audio_sec += audio.size / sample_rate
cache_path = path.replace(".flac", self._cache_extension)
if os.path.exists(cache_path):
with open(cache_path) as f:
res = f.read()
return self._normalizer.normalize(res)
start_sec = time.time()
res = self._model.transcribe(path, language=self._language_code)["text"]
self._proc_sec += time.time() - start_sec
with open(cache_path, "w") as f:
f.write(res)
res = self._normalizer.normalize(res)
return res
def audio_sec(self) -> float:
return self._audio_sec
def process_sec(self) -> float:
return self._proc_sec
def delete(self) -> None:
pass
def __str__(self) -> str:
raise NotImplementedError()
class WhisperTiny(Whisper):
def __init__(self, language: Languages):
model = "tiny.en" if language == Languages.EN else "tiny"
super().__init__(cache_extension=".wspt", model=model, language=language)
def __str__(self) -> str:
return "Whisper Tiny"
class WhisperBase(Whisper):
def __init__(self, language: Languages):
model = "base.en" if language == Languages.EN else "base"
super().__init__(cache_extension=".wspb", model=model, language=language)
def __str__(self) -> str:
return "Whisper Base"
class WhisperSmall(Whisper):
def __init__(self, language: Languages):
model = "small.en" if language == Languages.EN else "small"
super().__init__(cache_extension=".wsps", model=model, language=language)
def __str__(self) -> str:
return "Whisper Small"
class WhisperMedium(Whisper):
def __init__(self, language: Languages):
model = "medium.en" if language == Languages.EN else "medium"
super().__init__(cache_extension=".wspm", model=model, language=language)
def __str__(self) -> str:
return "Whisper Medium"
class WhisperLarge(Whisper):
def __init__(self, language: Languages):
super().__init__(cache_extension=".wspl", model="large-v1", language=language)
def __str__(self) -> str:
return "Whisper Large-v1"
class WhisperLargeV2(Whisper):
def __init__(self, language: Languages):
super().__init__(cache_extension=".wspl2", model="large-v2", language=language)
def __str__(self) -> str:
return "Whisper Large-v2"
class WhisperLargeV3(Whisper):
def __init__(self, language: Languages):
super().__init__(cache_extension=".wspl3", model="large-v3", language=language)
def __str__(self) -> str:
return "Whisper Large-v3"
class PicovoiceCheetahEngine(Engine):
def __init__(
self,
access_key: str,
model_path: Optional[str],
library_path: Optional[str],
language: Languages,
):
self._cheetah = pvcheetah.create(
access_key=access_key, model_path=model_path, library_path=library_path
)
self._normalizer = Normalizer.create(language)
self._audio_sec = 0.0
self._proc_sec = 0.0
def transcribe(self, path: str) -> str:
audio, sample_rate = soundfile.read(path, dtype="int16")
assert sample_rate == self._cheetah.sample_rate
self._audio_sec += audio.size / sample_rate
start_sec = time.time()
res = ""
for i in range(audio.size // self._cheetah.frame_length):
partial, _ = self._cheetah.process(
audio[
i
* self._cheetah.frame_length : (i + 1)
* self._cheetah.frame_length
]
)
res += partial
res += self._cheetah.flush()
self._proc_sec += time.time() - start_sec
return self._normalizer.normalize(res)
def audio_sec(self) -> float:
return self._audio_sec
def process_sec(self) -> float:
return self._proc_sec
def delete(self) -> None:
self._cheetah.delete()
def __str__(self) -> str:
return "Picovoice Cheetah"
class PicovoiceLeopardEngine(Engine):
def __init__(
self,
access_key: str,
model_path: Optional[str],
library_path: Optional[str],
language: Languages,
):
self._leopard = pvleopard.create(
access_key=access_key, model_path=model_path, library_path=library_path
)
self._normalizer = Normalizer.create(language)
self._audio_sec = 0.0
self._proc_sec = 0.0
def transcribe(self, path: str) -> str:
audio, sample_rate = soundfile.read(path, dtype="int16")
assert sample_rate == self._leopard.sample_rate
self._audio_sec += audio.size / sample_rate
start_sec = time.time()
res = self._leopard.process(audio)
self._proc_sec += time.time() - start_sec
return self._normalizer.normalize(res[0])
def audio_sec(self) -> float:
return self._audio_sec
def process_sec(self) -> float:
return self._proc_sec
def delete(self) -> None:
self._leopard.delete()
def __str__(self):
return "Picovoice Leopard"
__all__ = [
"Engine",
"Engines",
]