SoSa123456 commited on
Commit
e3ab055
1 Parent(s): 9cd3f2d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +100 -1
app.py CHANGED
@@ -1,3 +1,102 @@
1
  import gradio as gr
2
 
3
- gr.load("models/m3hrdadfi/wav2vec2-large-xlsr-persian").launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  import gradio as gr
2
 
3
+ gr.load("models/m3hrdadfi/wav2vec2-large-xlsr-persian").launch()
4
+
5
+
6
+ import librosa
7
+ import torch
8
+ import torchaudio
9
+ from transformers import Wav2Vec2ForCTC, Wav2Vec2Processor
10
+ from datasets import load_dataset
11
+
12
+ import numpy as np
13
+ import hazm
14
+ import re
15
+ import string
16
+
17
+ import IPython.display as ipd
18
+
19
+ _normalizer = hazm.Normalizer()
20
+ chars_to_ignore = [
21
+ ",", "?", ".", "!", "-", ";", ":", '""', "%", "'", '"', "�",
22
+ "#", "!", "؟", "?", "«", "»", "ء", "،", "(", ")", "؛", "'ٔ", "٬",'ٔ', ",", "?",
23
+ ".", "!", "-", ";", ":",'"',"“", "%", "‘", "”", "�", "–", "…", "_", "”", '“', '„'
24
+ ]
25
+
26
+ # In case of farsi
27
+ chars_to_ignore = chars_to_ignore + list(string.ascii_lowercase + string.digits)
28
+
29
+ chars_to_mapping = {
30
+ 'ك': 'ک', 'دِ': 'د', 'بِ': 'ب', 'زِ': 'ز', 'ذِ': 'ذ', 'شِ': 'ش', 'سِ': 'س', 'ى': 'ی',
31
+ 'ي': 'ی', 'أ': 'ا', 'ؤ': 'و', "ے": "ی", "ۀ": "ه", "ﭘ": "پ", "ﮐ": "ک", "ﯽ": "ی",
32
+ "ﺎ": "ا", "ﺑ": "ب", "ﺘ": "ت", "ﺧ": "خ", "ﺩ": "د", "ﺱ": "س", "ﻀ": "ض", "ﻌ": "ع",
33
+ "ﻟ": "ل", "ﻡ": "م", "ﻢ": "م", "ﻪ": "ه", "ﻮ": "و", "ئ": "ی", 'ﺍ': "ا", 'ة': "ه",
34
+ 'ﯾ': "ی", 'ﯿ': "ی", 'ﺒ': "ب", 'ﺖ': "ت", 'ﺪ': "د", 'ﺮ': "ر", 'ﺴ': "س", 'ﺷ': "ش",
35
+ 'ﺸ': "ش", 'ﻋ': "ع", 'ﻤ': "م", 'ﻥ': "ن", 'ﻧ': "ن", 'ﻭ': "و", 'ﺭ': "ر", "ﮔ": "گ",
36
+ "\u200c": " ", "\u200d": " ", "\u200e": " ", "\u200f": " ", "\ufeff": " ",
37
+ }
38
+
39
+ def multiple_replace(text, chars_to_mapping):
40
+ pattern = "|".join(map(re.escape, chars_to_mapping.keys()))
41
+ return re.sub(pattern, lambda m: chars_to_mapping[m.group()], str(text))
42
+
43
+ def remove_special_characters(text, chars_to_ignore_regex):
44
+ text = re.sub(chars_to_ignore_regex, '', text).lower() + " "
45
+ return text
46
+
47
+ def normalizer(batch, chars_to_ignore, chars_to_mapping):
48
+ chars_to_ignore_regex = f"""[{"".join(chars_to_ignore)}]"""
49
+ text = batch["sentence"].lower().strip()
50
+
51
+ text = _normalizer.normalize(text)
52
+ text = multiple_replace(text, chars_to_mapping)
53
+ text = remove_special_characters(text, chars_to_ignore_regex)
54
+
55
+ batch["sentence"] = text
56
+ return batch
57
+
58
+
59
+ def speech_file_to_array_fn(batch):
60
+ speech_array, sampling_rate = torchaudio.load(batch["path"])
61
+ speech_array = speech_array.squeeze().numpy()
62
+ speech_array = librosa.resample(np.asarray(speech_array), sampling_rate, 16_000)
63
+
64
+ batch["speech"] = speech_array
65
+ return batch
66
+
67
+
68
+ def predict(batch):
69
+ features = processor(batch["speech"], sampling_rate=16_000, return_tensors="pt", padding=True)
70
+
71
+ input_values = features.input_values.to(device)
72
+ attention_mask = features.attention_mask.to(device)
73
+
74
+ with torch.no_grad():
75
+ logits = model(input_values, attention_mask=attention_mask).logits
76
+
77
+ pred_ids = torch.argmax(logits, dim=-1)
78
+
79
+ batch["predicted"] = processor.batch_decode(pred_ids)[0]
80
+ return batch
81
+
82
+
83
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
84
+ processor = Wav2Vec2Processor.from_pretrained("m3hrdadfi/wav2vec2-large-xlsr-persian")
85
+ model = Wav2Vec2ForCTC.from_pretrained("m3hrdadfi/wav2vec2-large-xlsr-persian").to(device)
86
+
87
+ dataset = load_dataset("common_voice", "fa", split="test[:1%]")
88
+ dataset = dataset.map(
89
+ normalizer,
90
+ fn_kwargs={"chars_to_ignore": chars_to_ignore, "chars_to_mapping": chars_to_mapping},
91
+ remove_columns=list(set(dataset.column_names) - set(['sentence', 'path']))
92
+ )
93
+
94
+ dataset = dataset.map(speech_file_to_array_fn)
95
+ result = dataset.map(predict)
96
+
97
+ max_items = np.random.randint(0, len(result), 20).tolist()
98
+ for i in max_items:
99
+ reference, predicted = result["sentence"][i], result["predicted"][i]
100
+ print("reference:", reference)
101
+ print("predicted:", predicted)
102
+ print('---')