Commit
•
1cce671
1
Parent(s):
2569588
add microset
Browse files- peoples_speech.py +28 -7
peoples_speech.py
CHANGED
@@ -61,16 +61,16 @@ _LICENSE = [
|
|
61 |
"cc-by-sa-3.0", "cc-by-sa-4.0"
|
62 |
]
|
63 |
|
64 |
-
|
65 |
|
66 |
# relative path to data inside dataset's repo
|
67 |
-
_DATA_URL = "{split}/{config}/{config}_{archive_id:06d}.tar"
|
68 |
|
69 |
# relative path to file containing number of audio archives inside dataset's repo
|
70 |
-
_N_FILES_URL = "{split}/{config}/n_files.txt"
|
71 |
|
72 |
# relative path to metadata inside dataset's repo
|
73 |
-
_MANIFEST_URL = "{split}/{config}.json"
|
74 |
|
75 |
|
76 |
class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
@@ -78,7 +78,7 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
78 |
|
79 |
VERSION = datasets.Version("1.1.0")
|
80 |
BUILDER_CONFIGS = [
|
81 |
-
|
82 |
datasets.BuilderConfig(name="clean", version=VERSION, description="Clean, CC-BY licensed subset."),
|
83 |
datasets.BuilderConfig(name="dirty", version=VERSION, description="Dirty, CC-BY licensed subset."),
|
84 |
datasets.BuilderConfig(name="clean_sa", version=VERSION, description="Clean, CC-BY-SA licensed subset."),
|
@@ -107,12 +107,33 @@ class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
107 |
|
108 |
def _get_n_files(self, dl_manager, split, config):
|
109 |
n_files_url = _N_FILES_URL.format(split=split, config=config)
|
110 |
-
|
111 |
|
112 |
-
with open(
|
113 |
return int(f.read().strip())
|
114 |
|
115 |
def _split_generators(self, dl_manager):
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
116 |
n_files_train = self._get_n_files(dl_manager, split="train", config=self.config.name)
|
117 |
n_files_dev = self._get_n_files(dl_manager, split="dev", config="dev")
|
118 |
n_files_test = self._get_n_files(dl_manager, split="test", config="test")
|
|
|
61 |
"cc-by-sa-3.0", "cc-by-sa-4.0"
|
62 |
]
|
63 |
|
64 |
+
_BASE_URL = "https://huggingface.co/datasets/MLCommons/peoples_speech/resolve/main/"
|
65 |
|
66 |
# relative path to data inside dataset's repo
|
67 |
+
_DATA_URL = _BASE_URL + "{split}/{config}/{config}_{archive_id:06d}.tar"
|
68 |
|
69 |
# relative path to file containing number of audio archives inside dataset's repo
|
70 |
+
_N_FILES_URL = _BASE_URL + "{split}/{config}/n_files.txt"
|
71 |
|
72 |
# relative path to metadata inside dataset's repo
|
73 |
+
_MANIFEST_URL = _BASE_URL + "{split}/{config}.json"
|
74 |
|
75 |
|
76 |
class PeoplesSpeech(datasets.GeneratorBasedBuilder):
|
|
|
78 |
|
79 |
VERSION = datasets.Version("1.1.0")
|
80 |
BUILDER_CONFIGS = [
|
81 |
+
datasets.BuilderConfig(name="microset", version=VERSION, description="Small subset of clean data for example pusposes."),
|
82 |
datasets.BuilderConfig(name="clean", version=VERSION, description="Clean, CC-BY licensed subset."),
|
83 |
datasets.BuilderConfig(name="dirty", version=VERSION, description="Dirty, CC-BY licensed subset."),
|
84 |
datasets.BuilderConfig(name="clean_sa", version=VERSION, description="Clean, CC-BY-SA licensed subset."),
|
|
|
107 |
|
108 |
def _get_n_files(self, dl_manager, split, config):
|
109 |
n_files_url = _N_FILES_URL.format(split=split, config=config)
|
110 |
+
n_files_path = dl_manager.download_and_extract(n_files_url)
|
111 |
|
112 |
+
with open(n_files_path, encoding="utf-8") as f:
|
113 |
return int(f.read().strip())
|
114 |
|
115 |
def _split_generators(self, dl_manager):
|
116 |
+
|
117 |
+
if self.config.name == "microset":
|
118 |
+
# take only first data archive for demo purposes
|
119 |
+
url = [_DATA_URL.format(split="train", config="clean", archive_id=0)]
|
120 |
+
archive_path = dl_manager.download(url)
|
121 |
+
local_extracted_archive_path = dl_manager.extract(archive_path) if not dl_manager.is_streaming else [None]
|
122 |
+
manifest_url = _MANIFEST_URL.format(split="train", config="clean_000000") # train/clean_000000.json
|
123 |
+
manifest_path = dl_manager.download_and_extract(manifest_url)
|
124 |
+
|
125 |
+
return [
|
126 |
+
datasets.SplitGenerator(
|
127 |
+
name=datasets.Split.TRAIN,
|
128 |
+
gen_kwargs={
|
129 |
+
"local_extracted_archive_paths": local_extracted_archive_path,
|
130 |
+
# use iter_archive here to access the files in the TAR archives:
|
131 |
+
"archives": [dl_manager.iter_archive(path) for path in archive_path],
|
132 |
+
"manifest_path": manifest_path,
|
133 |
+
},
|
134 |
+
),
|
135 |
+
]
|
136 |
+
|
137 |
n_files_train = self._get_n_files(dl_manager, split="train", config=self.config.name)
|
138 |
n_files_dev = self._get_n_files(dl_manager, split="dev", config="dev")
|
139 |
n_files_test = self._get_n_files(dl_manager, split="test", config="test")
|