Datasets:
Merge branch 'streaming' of https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0 into streaming
Browse files- common_voice_11_0.py +73 -128
common_voice_11_0.py
CHANGED
@@ -23,6 +23,7 @@ import datasets
|
|
23 |
import requests
|
24 |
from datasets.utils.py_utils import size_str
|
25 |
from huggingface_hub import HfApi, HfFolder
|
|
|
26 |
|
27 |
from .languages import LANGUAGES
|
28 |
from .release_stats import STATS
|
@@ -41,7 +42,22 @@ _HOMEPAGE = "https://commonvoice.mozilla.org/en/datasets"
|
|
41 |
|
42 |
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
|
43 |
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
|
47 |
class CommonVoiceConfig(datasets.BuilderConfig):
|
@@ -71,7 +87,7 @@ class CommonVoiceConfig(datasets.BuilderConfig):
|
|
71 |
|
72 |
|
73 |
class CommonVoice(datasets.GeneratorBasedBuilder):
|
74 |
-
DEFAULT_CONFIG_NAME = "
|
75 |
DEFAULT_WRITER_BATCH_SIZE = 1000
|
76 |
|
77 |
BUILDER_CONFIGS = [
|
@@ -86,7 +102,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
86 |
total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
|
87 |
size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
|
88 |
)
|
89 |
-
for lang, lang_stats in STATS["locales"].items()
|
90 |
]
|
91 |
|
92 |
def _info(self):
|
@@ -121,140 +137,69 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
121 |
license=_LICENSE,
|
122 |
citation=_CITATION,
|
123 |
version=self.config.version,
|
124 |
-
# task_templates=[
|
125 |
-
# AutomaticSpeechRecognition(audio_file_path_column="path", transcription_column="sentence")
|
126 |
-
# ],
|
127 |
)
|
128 |
|
129 |
-
def _get_bundle_url(self, locale, url_template):
|
130 |
-
# path = encodeURIComponent(path)
|
131 |
-
path = url_template.replace("{locale}", locale)
|
132 |
-
path = urllib.parse.quote(path.encode("utf-8"), safe="~()*!.'")
|
133 |
-
# use_cdn = self.config.size_bytes < 20 * 1024 * 1024 * 1024
|
134 |
-
# response = requests.get(f"{_API_URL}/bucket/dataset/{path}/{use_cdn}", timeout=10.0).json()
|
135 |
-
response = requests.get(f"{_API_URL}/bucket/dataset/{path}", timeout=10.0).json()
|
136 |
-
return response["url"]
|
137 |
-
|
138 |
-
def _log_download(self, locale, bundle_version, auth_token):
|
139 |
-
if isinstance(auth_token, bool):
|
140 |
-
auth_token = HfFolder().get_token()
|
141 |
-
whoami = HfApi().whoami(auth_token)
|
142 |
-
email = whoami["email"] if "email" in whoami else ""
|
143 |
-
payload = {"email": email, "locale": locale, "dataset": bundle_version}
|
144 |
-
requests.post(f"{_API_URL}/{locale}/downloaders", json=payload).json()
|
145 |
-
|
146 |
def _split_generators(self, dl_manager):
|
147 |
-
|
148 |
-
|
149 |
-
|
150 |
-
|
151 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
152 |
)
|
153 |
|
154 |
-
|
155 |
-
bundle_version = bundle_url_template.split("/")[0]
|
156 |
-
dl_manager.download_config.ignore_url_params = True
|
157 |
-
|
158 |
-
self._log_download(self.config.name, bundle_version, hf_auth_token)
|
159 |
-
archive_path = dl_manager.download(self._get_bundle_url(self.config.name, bundle_url_template))
|
160 |
-
local_extracted_archive = dl_manager.extract(archive_path) if not dl_manager.is_streaming else None
|
161 |
-
|
162 |
-
if self.config.version < datasets.Version("5.0.0"):
|
163 |
-
path_to_data = ""
|
164 |
-
else:
|
165 |
-
path_to_data = "/".join([bundle_version, self.config.name])
|
166 |
-
path_to_clips = "/".join([path_to_data, "clips"]) if path_to_data else "clips"
|
167 |
-
|
168 |
-
return [
|
169 |
-
datasets.SplitGenerator(
|
170 |
-
name=datasets.Split.TRAIN,
|
171 |
-
gen_kwargs={
|
172 |
-
"local_extracted_archive": local_extracted_archive,
|
173 |
-
"archive_iterator": dl_manager.iter_archive(archive_path),
|
174 |
-
"metadata_filepath": "/".join([path_to_data, "train.tsv"]) if path_to_data else "train.tsv",
|
175 |
-
"path_to_clips": path_to_clips,
|
176 |
-
},
|
177 |
-
),
|
178 |
-
datasets.SplitGenerator(
|
179 |
-
name=datasets.Split.TEST,
|
180 |
-
gen_kwargs={
|
181 |
-
"local_extracted_archive": local_extracted_archive,
|
182 |
-
"archive_iterator": dl_manager.iter_archive(archive_path),
|
183 |
-
"metadata_filepath": "/".join([path_to_data, "test.tsv"]) if path_to_data else "test.tsv",
|
184 |
-
"path_to_clips": path_to_clips,
|
185 |
-
},
|
186 |
-
),
|
187 |
-
datasets.SplitGenerator(
|
188 |
-
name=datasets.Split.VALIDATION,
|
189 |
-
gen_kwargs={
|
190 |
-
"local_extracted_archive": local_extracted_archive,
|
191 |
-
"archive_iterator": dl_manager.iter_archive(archive_path),
|
192 |
-
"metadata_filepath": "/".join([path_to_data, "dev.tsv"]) if path_to_data else "dev.tsv",
|
193 |
-
"path_to_clips": path_to_clips,
|
194 |
-
},
|
195 |
-
),
|
196 |
-
datasets.SplitGenerator(
|
197 |
-
name="other",
|
198 |
-
gen_kwargs={
|
199 |
-
"local_extracted_archive": local_extracted_archive,
|
200 |
-
"archive_iterator": dl_manager.iter_archive(archive_path),
|
201 |
-
"metadata_filepath": "/".join([path_to_data, "other.tsv"]) if path_to_data else "other.tsv",
|
202 |
-
"path_to_clips": path_to_clips,
|
203 |
-
},
|
204 |
-
),
|
205 |
-
datasets.SplitGenerator(
|
206 |
-
name="invalidated",
|
207 |
-
gen_kwargs={
|
208 |
-
"local_extracted_archive": local_extracted_archive,
|
209 |
-
"archive_iterator": dl_manager.iter_archive(archive_path),
|
210 |
-
"metadata_filepath": "/".join([path_to_data, "invalidated.tsv"])
|
211 |
-
if path_to_data
|
212 |
-
else "invalidated.tsv",
|
213 |
-
"path_to_clips": path_to_clips,
|
214 |
-
},
|
215 |
-
),
|
216 |
-
]
|
217 |
|
218 |
-
def _generate_examples(
|
219 |
-
self,
|
220 |
-
local_extracted_archive,
|
221 |
-
archive_iterator,
|
222 |
-
metadata_filepath,
|
223 |
-
path_to_clips,
|
224 |
-
):
|
225 |
-
"""Yields examples."""
|
226 |
data_fields = list(self._info().features.keys())
|
227 |
metadata = {}
|
228 |
-
|
229 |
-
|
230 |
-
|
231 |
-
|
232 |
-
|
233 |
-
|
234 |
-
|
235 |
-
|
236 |
-
|
237 |
-
|
238 |
-
|
239 |
-
|
240 |
-
|
241 |
-
|
242 |
-
|
243 |
-
|
244 |
-
|
245 |
-
|
246 |
-
|
247 |
-
metadata[
|
248 |
-
elif path.startswith(path_to_clips):
|
249 |
-
assert metadata_found, "Found audio clips before the metadata TSV file."
|
250 |
-
if not metadata:
|
251 |
-
break
|
252 |
-
if path in metadata:
|
253 |
-
result = dict(metadata[path])
|
254 |
# set the audio feature and the path to the extracted file
|
255 |
-
path = os.path.join(
|
256 |
-
result["audio"] = {"path": path, "bytes":
|
257 |
# set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
|
258 |
-
result["path"] = path if
|
259 |
|
260 |
yield path, result
|
|
|
23 |
import requests
|
24 |
from datasets.utils.py_utils import size_str
|
25 |
from huggingface_hub import HfApi, HfFolder
|
26 |
+
from tqdm import tqdm
|
27 |
|
28 |
from .languages import LANGUAGES
|
29 |
from .release_stats import STATS
|
|
|
42 |
|
43 |
_LICENSE = "https://creativecommons.org/publicdomain/zero/1.0/"
|
44 |
|
45 |
+
_N_SHARDS = {
|
46 |
+
"ar": {
|
47 |
+
"train": 1,
|
48 |
+
"dev": 1,
|
49 |
+
"test": 1,
|
50 |
+
"other": 2,
|
51 |
+
"invalidated": 1,
|
52 |
+
}
|
53 |
+
}
|
54 |
+
|
55 |
+
# TODO: change "streaming" to "main" after merge!
|
56 |
+
_BASE_URL = "https://huggingface.co/datasets/mozilla-foundation/common_voice_11_0/resolve/streaming/"
|
57 |
+
|
58 |
+
_AUDIO_URL = _BASE_URL + "audio/{lang}/{split}/{lang}_{split}_{shard_idx}.tar"
|
59 |
+
|
60 |
+
_TRANSCRIPT_URL = _BASE_URL + "transcript/{lang}/{split}.tsv"
|
61 |
|
62 |
|
63 |
class CommonVoiceConfig(datasets.BuilderConfig):
|
|
|
87 |
|
88 |
|
89 |
class CommonVoice(datasets.GeneratorBasedBuilder):
|
90 |
+
DEFAULT_CONFIG_NAME = "ar"
|
91 |
DEFAULT_WRITER_BATCH_SIZE = 1000
|
92 |
|
93 |
BUILDER_CONFIGS = [
|
|
|
102 |
total_hr=float(lang_stats["totalHrs"]) if lang_stats["totalHrs"] else None,
|
103 |
size_bytes=int(lang_stats["size"]) if lang_stats["size"] else None,
|
104 |
)
|
105 |
+
for lang, lang_stats in STATS["locales"].items() if lang == "ar"
|
106 |
]
|
107 |
|
108 |
def _info(self):
|
|
|
137 |
license=_LICENSE,
|
138 |
citation=_CITATION,
|
139 |
version=self.config.version,
|
|
|
|
|
|
|
140 |
)
|
141 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
142 |
def _split_generators(self, dl_manager):
|
143 |
+
lang = self.config.name
|
144 |
+
audio_urls = {}
|
145 |
+
splits = ("train", "dev", "test", "other", "invalidated")
|
146 |
+
for split in splits:
|
147 |
+
audio_urls[split] = [
|
148 |
+
_AUDIO_URL.format(lang=lang, split=split, shard_idx=i) for i in range(_N_SHARDS[lang][split])
|
149 |
+
]
|
150 |
+
archive_paths = dl_manager.download(audio_urls)
|
151 |
+
local_extracted_archive_paths = dl_manager.extract(archive_paths) if not dl_manager.is_streaming else {}
|
152 |
+
|
153 |
+
meta_urls = {split: _TRANSCRIPT_URL.format(lang=lang, split=split) for split in splits}
|
154 |
+
meta_paths = dl_manager.download_and_extract(meta_urls)
|
155 |
+
|
156 |
+
split_generators = []
|
157 |
+
split_names = {
|
158 |
+
"train": datasets.Split.TRAIN,
|
159 |
+
"dev": datasets.Split.VALIDATION,
|
160 |
+
"test": datasets.Split.TEST,
|
161 |
+
}
|
162 |
+
for split in splits:
|
163 |
+
split_generators.append(
|
164 |
+
datasets.SplitGenerator(
|
165 |
+
name=split_names.get(split, split),
|
166 |
+
gen_kwargs={
|
167 |
+
"local_extracted_archive_paths": local_extracted_archive_paths.get(split),
|
168 |
+
"archives": [dl_manager.iter_archive(path) for path in archive_paths.get(split)],
|
169 |
+
"meta_path": meta_paths[split],
|
170 |
+
},
|
171 |
+
),
|
172 |
)
|
173 |
|
174 |
+
return split_generators
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
175 |
|
176 |
+
def _generate_examples(self, local_extracted_archive_paths, archives, meta_path):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
177 |
data_fields = list(self._info().features.keys())
|
178 |
metadata = {}
|
179 |
+
with open(meta_path, encoding="utf-8") as f:
|
180 |
+
reader = csv.DictReader(f, delimiter="\t", quoting=csv.QUOTE_NONE)
|
181 |
+
for row in tqdm(reader, desc="Reading metadata..."):
|
182 |
+
if not row["path"].endswith(".mp3"):
|
183 |
+
row["path"] += ".mp3"
|
184 |
+
# accent -> accents in CV 8.0
|
185 |
+
if "accents" in row:
|
186 |
+
row["accent"] = row["accents"]
|
187 |
+
del row["accents"]
|
188 |
+
# if data is incomplete, fill with empty values
|
189 |
+
for field in data_fields:
|
190 |
+
if field not in row:
|
191 |
+
row[field] = ""
|
192 |
+
metadata[row["path"]] = row
|
193 |
+
|
194 |
+
for i, audio_archive in enumerate(archives):
|
195 |
+
for filename, file in audio_archive:
|
196 |
+
_, filename = os.path.split(filename)
|
197 |
+
if filename in metadata:
|
198 |
+
result = dict(metadata[filename])
|
|
|
|
|
|
|
|
|
|
|
|
|
199 |
# set the audio feature and the path to the extracted file
|
200 |
+
path = os.path.join(local_extracted_archive_paths[i], filename) if local_extracted_archive_paths else filename
|
201 |
+
result["audio"] = {"path": path, "bytes": file.read()}
|
202 |
# set path to None if the audio file doesn't exist locally (i.e. in streaming mode)
|
203 |
+
result["path"] = path if local_extracted_archive_paths else filename
|
204 |
|
205 |
yield path, result
|