georgechang8
commited on
Update README.md
Browse files
README.md
CHANGED
@@ -23,3 +23,195 @@ configs:
|
|
23 |
- split: train
|
24 |
path: data/train-*
|
25 |
---
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
23 |
- split: train
|
24 |
path: data/train-*
|
25 |
---
|
26 |
+
|
27 |
+
# Dataset Card for code-switching yodas
|
28 |
+
|
29 |
+
<!-- Provide a quick summary of the dataset. -->
|
30 |
+
|
31 |
+
This dataset is derived from espnet/yodas, more details can be found here: https://huggingface.co/datasets/espnet/yodas
|
32 |
+
|
33 |
+
This is a subset of the zh000 subset of espnet/yodas dataset, which focuses on videos with Mandarin-English code-switching phenomenon.
|
34 |
+
|
35 |
+
## Dataset Details
|
36 |
+
|
37 |
+
### Dataset Description
|
38 |
+
|
39 |
+
<!-- Provide a longer summary of what this dataset is. -->
|
40 |
+
|
41 |
+
- **Language(s):** Chinese, English
|
42 |
+
- **License:** CC-BY-3.0
|
43 |
+
|
44 |
+
### Dataset Sources [optional]
|
45 |
+
|
46 |
+
<!-- Provide the basic links for the dataset. -->
|
47 |
+
|
48 |
+
- **Repository:** https://huggingface.co/datasets/espnet/yodas
|
49 |
+
|
50 |
+
## Dataset Creation
|
51 |
+
|
52 |
+
#### Data Collection and Processing
|
53 |
+
|
54 |
+
<!-- This section describes the data collection and processing process such as data selection criteria, filtering and normalization methods, tools and libraries used, etc. -->
|
55 |
+
|
56 |
+
1. Read the text content of clips of espnet/yodas
|
57 |
+
```python
|
58 |
+
import glob
|
59 |
+
import re
|
60 |
+
import pandas as pd
|
61 |
+
from pathlib import Path
|
62 |
+
from tqdm.auto import tqdm
|
63 |
+
from collections import defaultdict
|
64 |
+
from dataclasses import dataclass, asdict
|
65 |
+
|
66 |
+
@dataclass
|
67 |
+
class Video:
|
68 |
+
name: str = ""
|
69 |
+
shard: str = ""
|
70 |
+
duration: float = 0
|
71 |
+
content: str = ""
|
72 |
+
|
73 |
+
data = defaultdict(Video)
|
74 |
+
trange = tqdm(glob.glob("yodas/data/zh000/text/*.txt"))
|
75 |
+
for file in trange:
|
76 |
+
shard = Path(file).stem
|
77 |
+
with open(file, "r", encoding="utf8") as f:
|
78 |
+
for m in re.finditer(r"(.{11})-\d{5}-\d{8}-(\d{8})\s+(.*)", f.read()):
|
79 |
+
name = m.group(1)
|
80 |
+
assert data[name].shard in ["", shard]
|
81 |
+
data[name].shard = shard
|
82 |
+
data[name].name = name
|
83 |
+
data[name].duration = int(m.group(2)) / 100
|
84 |
+
data[name].content += " " + m.group(3)
|
85 |
+
trange.set_postfix(vids=len(data))
|
86 |
+
|
87 |
+
data_df = pd.DataFrame(map(asdict, data.values()))
|
88 |
+
```
|
89 |
+
2. Retain videos with chinese symbols
|
90 |
+
```python
|
91 |
+
import re
|
92 |
+
cjk_pattern = re.compile(
|
93 |
+
# puncs \uff00-\uffef \u3000-\u303f
|
94 |
+
r"[\u3400-\u4db5\u4e00-\u9fa5\u9fa6-\u9fbb\uf900-\ufa2d\ufa30-\ufa6a\ufa70-\ufad9\u2e80-\u2eff\u31c0-\u31ef\u2f00-\u2fdf\u2ff0-\u2fff\u3100-\u312f\u31a0-\u31bf\ufe10-\ufe1f\ufe30-\ufe4f\u2600-\u26ff\u2700-\u27bf\u3200-\u32ff\u3300-\u33ff]"
|
95 |
+
)
|
96 |
+
chinese_df = data_df[data_df['content'].apply(lambda x: cjk_pattern.search(x) is not None)]
|
97 |
+
```
|
98 |
+
3. Filter out videos with Pingyin's
|
99 |
+
```python
|
100 |
+
pinyin_pattern = re.compile(
|
101 |
+
r'[üÜāáǎàōóǒòēéěèīíǐìūúǔùǖǘǚǜ]'
|
102 |
+
)
|
103 |
+
chinese_pin_df = chinese_df[chinese_df['content'].apply(lambda x: pinyin_pattern.search(x) is None)]
|
104 |
+
```
|
105 |
+
4. Retain videos with latin scripts
|
106 |
+
```python
|
107 |
+
az_pattern = re.compile(
|
108 |
+
r"[a-zA-Z]+"
|
109 |
+
)
|
110 |
+
mixed_df = chinese_pin_df[chinese_pin_df['content'].apply(lambda x: az_pattern.search(x) is not None)]
|
111 |
+
```
|
112 |
+
5. Retain videos with punctuations
|
113 |
+
```python
|
114 |
+
punc_pattern = re.compile(
|
115 |
+
r'[!?。,、·.,?!]'
|
116 |
+
)
|
117 |
+
mixed_punc_df = mixed_df[mixed_df['content'].apply(lambda x: punc_pattern.search(x) is not None)]
|
118 |
+
```
|
119 |
+
6. Sort by increasing proportion of chinese characters
|
120 |
+
```python
|
121 |
+
def func(x):
|
122 |
+
return x.apply(lambda z: len(cjk_pattern.findall(z)) / len(z))
|
123 |
+
mixed_punc_df = mixed_punc_df.sort_values(by='content', key=func)
|
124 |
+
```
|
125 |
+
> This gives around 1000 videos left.
|
126 |
+
|
127 |
+
7. Save to csv to for manual inspection
|
128 |
+
```python
|
129 |
+
mixed_punc_df.to_csv('sanity.csv')
|
130 |
+
```
|
131 |
+
8. Manually inspect 0-500
|
132 |
+
- NwRTR8mY-7A: mostly english
|
133 |
+
- ASL3yEYC1IE, etc.: contains English translation for each line
|
134 |
+
- Recurring creators whose content is not good code-switching: "天天開心","日向蓝子","笑花兒","关于麻将的职人","大濕:","朋友sisi","please my hero","金玲老師"
|
135 |
+
- Manually pick exceptions to previous rule to add to accepted list
|
136 |
+
- Recurring creators whose content is good code-switching: "我是小夫","久德電子","GL_TECH"
|
137 |
+
- Most videos about: "U.S. stock market", "tech reviews" are accepted.
|
138 |
+
|
139 |
+
9. Quickly skim through 501-1000 (only 10 were picked)
|
140 |
+
|
141 |
+
> A total of 176 videos were picked in step 8 & 9
|
142 |
+
|
143 |
+
10. Extract selected video clips' audio
|
144 |
+
```python
|
145 |
+
from tqdm.auto import tqdm
|
146 |
+
from pathlib import path
|
147 |
+
import tarfile
|
148 |
+
|
149 |
+
with open("codeswitch.txt", "r") as f: # list of 176 picked video_ids
|
150 |
+
codeswitch = set(map(str.strip, f.readlines()))
|
151 |
+
code_switch_data = data_df[data_df['name'].apply(lambda x: x in codeswitch)]
|
152 |
+
|
153 |
+
shard_names = {}
|
154 |
+
for name, shard in zip(
|
155 |
+
code_switch_data['name'].tolist(),
|
156 |
+
code_switch_data['shard'].tolist()
|
157 |
+
):
|
158 |
+
if shard not in shard_names:
|
159 |
+
shard_names[shard] = set()
|
160 |
+
shard_names[shard].add(name)
|
161 |
+
|
162 |
+
def extract_wav_files(shard, output_dir):
|
163 |
+
# Create the output directory if it doesn't exist
|
164 |
+
tar_file_path = f"yodas/data/zh000/audio/{shard}.tar.gz"
|
165 |
+
names = shard_names[shard]
|
166 |
+
|
167 |
+
# Open the tar.gz file
|
168 |
+
with tarfile.open(tar_file_path, 'r:gz') as tar:
|
169 |
+
# Iterate through the contents of the tar file
|
170 |
+
for member in tar.getmembers():
|
171 |
+
# Check if the member is a WAV file
|
172 |
+
video_id = re.search(r"(.{11})-\d{5}-\d{8}-\d{8}", member.name)
|
173 |
+
if video_id and video_id.group(1) in names:
|
174 |
+
# Extract the WAV file contents into the output directory
|
175 |
+
output_path = Path(output_dir, Path(member.name).name)
|
176 |
+
with open(output_path, 'wb') as output_file:
|
177 |
+
output_file.write(tar.extractfile(member).read())
|
178 |
+
|
179 |
+
output_dir = "./code_switch_yodas"
|
180 |
+
Path(output_dir).mkdir(exist_ok=True, parents=True)
|
181 |
+
for shard in tqdm(shard_names):
|
182 |
+
extract_wav_files(shard, output_dir)
|
183 |
+
```
|
184 |
+
11. Publish the subset
|
185 |
+
```python
|
186 |
+
import datasets
|
187 |
+
from datasets import Dataset
|
188 |
+
|
189 |
+
audio_dataset = Dataset.from_dict({
|
190 |
+
"audio": [
|
191 |
+
f"{output_dir}/{clip_id}.wav"
|
192 |
+
for clip_id in clip_ids
|
193 |
+
],
|
194 |
+
"text": texts,
|
195 |
+
"id": clip_ids,
|
196 |
+
"session_id": [x[:11] for x in clip_ids]
|
197 |
+
})
|
198 |
+
audio_dataset = audio_dataset.cast_column("audio", datasets.features.Audio(sampling_rate=16000))
|
199 |
+
audio_dataset = audio_dataset.sort("id")
|
200 |
+
audio_dataset.push_to_hub(
|
201 |
+
"georgechang8/code_switch_yodas_zh",
|
202 |
+
commit_message="Initial commit",
|
203 |
+
embed_external_files=True
|
204 |
+
)
|
205 |
+
```
|
206 |
+
|
207 |
+
## Limitations
|
208 |
+
|
209 |
+
<!-- This section is meant to convey both technical and sociotechnical limitations. -->
|
210 |
+
|
211 |
+
1. The filtering & hand-picking process might left out useful videos.
|
212 |
+
2. The transcriptions is not processed in any way, so might need further cleansing.
|
213 |
+
|
214 |
+
## Dataset Card Contact
|
215 |
+
|
216 |
+
Original dataset: https://huggingface.co/datasets/espnet/yodas
|
217 |
+
CS processing: Chih-Chiang Chang (cc.chang0828@gmail.com)
|