georgechang8
commited on
Update README.md
Browse filesAdded cleansing steps
README.md
CHANGED
@@ -1,24 +1,5 @@
|
|
1 |
---
|
2 |
dataset_info:
|
3 |
-
- config_name: clean
|
4 |
-
features:
|
5 |
-
- name: audio
|
6 |
-
dtype:
|
7 |
-
audio:
|
8 |
-
sampling_rate: 16000
|
9 |
-
- name: text
|
10 |
-
dtype: string
|
11 |
-
- name: id
|
12 |
-
dtype: string
|
13 |
-
- name: session_id
|
14 |
-
dtype: string
|
15 |
-
splits:
|
16 |
-
- name: train
|
17 |
-
num_bytes: 3354307483.0
|
18 |
-
num_examples: 46583
|
19 |
-
download_size: 3346711427
|
20 |
-
dataset_size: 3354307483.0
|
21 |
-
- config_name: default
|
22 |
features:
|
23 |
- name: audio
|
24 |
dtype:
|
@@ -37,10 +18,6 @@ dataset_info:
|
|
37 |
download_size: 3346820592
|
38 |
dataset_size: 3341105554.6299996
|
39 |
configs:
|
40 |
-
- config_name: clean
|
41 |
-
data_files:
|
42 |
-
- split: train
|
43 |
-
path: clean/train-*
|
44 |
- config_name: default
|
45 |
data_files:
|
46 |
- split: train
|
@@ -53,10 +30,33 @@ configs:
|
|
53 |
|
54 |
This dataset is derived from espnet/yodas, more details can be found here: https://huggingface.co/datasets/espnet/yodas
|
55 |
|
56 |
-
This is a subset of the zh000 subset of espnet/yodas dataset, which
|
|
|
|
|
57 |
|
58 |
## Dataset Details
|
59 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
60 |
### Dataset Description
|
61 |
|
62 |
<!-- Provide a longer summary of what this dataset is. -->
|
@@ -226,6 +226,100 @@ audio_dataset.push_to_hub(
|
|
226 |
embed_external_files=True
|
227 |
)
|
228 |
```
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
229 |
|
230 |
## Limitations
|
231 |
|
|
|
1 |
---
|
2 |
dataset_info:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
features:
|
4 |
- name: audio
|
5 |
dtype:
|
|
|
18 |
download_size: 3346820592
|
19 |
dataset_size: 3341105554.6299996
|
20 |
configs:
|
|
|
|
|
|
|
|
|
21 |
- config_name: default
|
22 |
data_files:
|
23 |
- split: train
|
|
|
30 |
|
31 |
This dataset is derived from espnet/yodas, more details can be found here: https://huggingface.co/datasets/espnet/yodas
|
32 |
|
33 |
+
This is a subset of the zh000 subset of espnet/yodas dataset, which selects videos with Mandarin-English code-switching phenomenon.
|
34 |
+
|
35 |
+
Note that code-switching is only gauranteed per video rather than per utterance. Therefore, not every utterance in the dataset contains code-switching.
|
36 |
|
37 |
## Dataset Details
|
38 |
|
39 |
+
### Dataset Usage
|
40 |
+
The `default` config does not modify any text of the selected samples.
|
41 |
+
```python
|
42 |
+
from datasets import load_dataset
|
43 |
+
cs_yodas = load_dataset("georgechang8/code_switch_yodas_zh")
|
44 |
+
```
|
45 |
+
The `clean` config cleanses the text of the selected samples (as in the processing).
|
46 |
+
```python
|
47 |
+
from datasets import load_dataset
|
48 |
+
cs_yodas_clean = load_dataset("georgechang8/code_switch_yodas_zh", "clean")
|
49 |
+
```
|
50 |
+
```python
|
51 |
+
{'audio': {'path': 'GaUSbuZm5Ec-00207-00083809-00084143.wav',
|
52 |
+
'array': array([-0.09082031, 0.01898193, 0.02850342, ..., 0.01419067,
|
53 |
+
0.01391602, 0.01513672]),
|
54 |
+
'sampling_rate': 16000},
|
55 |
+
'text': '項明生,訂Agoda的項明生',
|
56 |
+
'id': 'GaUSbuZm5Ec-00207-00083809-00084143',
|
57 |
+
'session_id': 'GaUSbuZm5Ec'}
|
58 |
+
```
|
59 |
+
|
60 |
### Dataset Description
|
61 |
|
62 |
<!-- Provide a longer summary of what this dataset is. -->
|
|
|
226 |
embed_external_files=True
|
227 |
)
|
228 |
```
|
229 |
+
#### Data Cleaning
|
230 |
+
1. The video `Pew9CK74axu` is manually cleaned
|
231 |
+
```python
|
232 |
+
def filter_fn(batch):
|
233 |
+
return (z == 'Pew9CK74axu' for z in batch['session_id'])
|
234 |
+
|
235 |
+
special_care = audio_dataset.filter(filter_fn, num_proc=8, batched=True)
|
236 |
+
with open("manual_edit.txt", "w", encoding="utf8") as f:
|
237 |
+
for l in special_care['text']:
|
238 |
+
f.write(l + "\n")
|
239 |
+
# manual cleaning ...
|
240 |
+
with open("manual_edit_finish.txt", "r", encoding="utf8") as f:
|
241 |
+
lines = list(map(str.strip, f.readlines()))
|
242 |
+
replace_dict = {
|
243 |
+
a: b
|
244 |
+
for a, b in zip(special_care['id'], lines)
|
245 |
+
}
|
246 |
+
def manual_edit(batch):
|
247 |
+
texts = []
|
248 |
+
for sid, orig in zip(batch['id'], batch['text']):
|
249 |
+
texts += [replace_dict.get(sid, orig)]
|
250 |
+
return {'text': texts}
|
251 |
+
|
252 |
+
audio_dataset_manual = audio_dataset.map(manual_edit, batched=True, num_proc=8)
|
253 |
+
```
|
254 |
+
2. General cleansing pipeline
|
255 |
+
```python
|
256 |
+
import re
|
257 |
+
import html
|
258 |
+
|
259 |
+
def remove_emojies(text):
|
260 |
+
# Ref: https://gist.github.com/Alex-Just/e86110836f3f93fe7932290526529cd1#gistcomment-3208085
|
261 |
+
# Ref: https://en.wikipedia.org/wiki/Unicode_block
|
262 |
+
EMOJI_PATTERN = re.compile(
|
263 |
+
"["
|
264 |
+
"\U0001F1E0-\U0001F1FF" # flags (iOS)
|
265 |
+
"\U0001F300-\U0001F5FF" # symbols & pictographs
|
266 |
+
"\U0001F600-\U0001F64F" # emoticons
|
267 |
+
"\U0001F680-\U0001F6FF" # transport & map symbols
|
268 |
+
"\U0001F700-\U0001F77F" # alchemical symbols
|
269 |
+
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
|
270 |
+
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
|
271 |
+
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
|
272 |
+
"\U0001FA00-\U0001FA6F" # Chess Symbols
|
273 |
+
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
|
274 |
+
"\U00002702-\U000027B0" # Dingbats
|
275 |
+
"]"
|
276 |
+
)
|
277 |
+
text = re.sub(EMOJI_PATTERN, r' ', text)
|
278 |
+
return text
|
279 |
+
|
280 |
+
def clean_transcripts(x):
|
281 |
+
cjk = "[\u3400-\u4db5\u4e00-\u9fa5\u9fa6-\u9fbb\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\uff00-\uffef\u2e80-\u2eff\u3000-\u303f\u31c0-\u31ef\u2f00-\u2fdf\u2ff0-\u2fff\u3100-\u312f\u31a0-\u31bf\ufe10-\ufe1f\ufe30-\ufe4f\u2600-\u26ff\u2700-\u27bf\u3200-\u32ff\u3300-\u33ff]"
|
282 |
+
x = html.unescape(x)
|
283 |
+
x = remove_emojies(x)
|
284 |
+
dots = '\.{3,}'
|
285 |
+
x = re.sub(rf'{dots}|…|\s|^|$', ' ', x) # expanding space allows matching " uh uh" case
|
286 |
+
x = re.sub(rf"({cjk}|\s)([Uu][mh]|U[MH])({cjk}|\s)", r"\1 \3", x) # uh/um surrounded by cjk or space
|
287 |
+
x = re.sub(r"([HhEe]mm+|[HE]MM+)", " ", x) # hmm emm
|
288 |
+
x = re.sub(fr"\*+({cjk}+|[A-Za-z]+)\*+", " ", x) # *叹气*
|
289 |
+
x = re.sub(r'[呃嗯]', ' ', x) # 呃嗯
|
290 |
+
def replace_except(pattern, repl, z, excs):
|
291 |
+
for e, t in excs:
|
292 |
+
z = z.replace(e, t)
|
293 |
+
z = re.sub(pattern, repl, z)
|
294 |
+
for e, t in excs:
|
295 |
+
z = z.replace(t, e)
|
296 |
+
return z
|
297 |
+
# remove 恩 except for 恩桥 感恩 恩怨
|
298 |
+
x = replace_except("恩", ' ', x, excs=[("感恩", "呃"),("恩桥", "嗯"),("恩怨", "emm")])
|
299 |
+
# remove (...) except for 'Program Files (x86)'
|
300 |
+
x = re.sub(r'([^()]*)', ' ', x)
|
301 |
+
x = re.sub(r"\s+", " ", x)
|
302 |
+
x = replace_except(r'\([^()]*\)', ' ', x, excs=[("Program Files (x86)", "呃")])
|
303 |
+
puncs = r'[,?!。;?!,;~~]'
|
304 |
+
x = re.sub(rf'({puncs})(?:\s*\1)+', r'\1', x) # ??? -> ?
|
305 |
+
x = re.sub(rf"\s+({puncs})", r'\1', x) # text , -> text,
|
306 |
+
sp_puncs = r'[?!,;]' # puncs with spaces
|
307 |
+
x = re.sub(rf"({puncs}*{sp_puncs})([a-zA-Z])", r'\1 \2', x) # text,cont -> text, cont
|
308 |
+
x = re.sub(rf"^[\s]*{puncs}+", "", x) # leading puncs
|
309 |
+
x = re.sub(r"\s+", " ", x) # excess spaces
|
310 |
+
return x.strip()
|
311 |
+
|
312 |
+
audio_dataset_manual_clean = audio_dataset_manual.map(lambda x: {"text": list(map(clean_transcripts, x['text']))}, batched=True, num_proc=8)
|
313 |
+
# push to hub
|
314 |
+
audio_dataset_manual_clean.push_to_hub(
|
315 |
+
"georgechang8/code_switch_yodas_zh",
|
316 |
+
config_name="clean",
|
317 |
+
set_default=False,
|
318 |
+
commit_message="Clean transcript",
|
319 |
+
max_shard_size="1GB",
|
320 |
+
embed_external_files=True,
|
321 |
+
)
|
322 |
+
```
|
323 |
|
324 |
## Limitations
|
325 |
|