# Copyright 2023 Xiaomi Corporation (Author: Junbo Zhang) and the HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import json import datasets _CITATION = """\ @inproceedings{speechocean762, title={speechocean762: An Open-Source Non-native English Speech Corpus For Pronunciation Assessment}, booktitle={Proc. Interspeech 2021}, year=2021, author={Junbo Zhang, Zhiwen Zhang, Yongqing Wang, Zhiyong Yan, Qiong Song, Yukai Huang, Ke Li, Daniel Povey, Yujun Wang} } """ _DESCRIPTION = """\ A free public dataset for the pronunciation scoring task. This corpus consists of 5000 English sentences. All the speakers are non-native, and their mother tongue is Mandarin. Half of the speakers are Children, and the others are adults. The information of age and gender are provided. Five experts made the scores. To avoid subjective bias, each expert scores independently under the same metric. """ _HOMEPAGE = "https://www.openslr.org/101/" _VERSION = "1.2.0" _DL_URL = ( f"https://github.com/jimbozhang/speechocean762/archive/refs/tags/v{_VERSION}.tar.gz" ) _LICENSE = "Attribution 4.0 International (CC BY 4.0)" class Speechocean762(datasets.GeneratorBasedBuilder): VERSION = datasets.Version("1.3.0") BUILDER_CONFIGS = [ datasets.BuilderConfig(name="train", version=VERSION), datasets.BuilderConfig(name="test", version=VERSION), ] DEFAULT_CONFIG_NAME = "test" def _info(self): return datasets.DatasetInfo( description=_DESCRIPTION, features=datasets.Features( { "file": datasets.Value("string"), "audio": datasets.Audio(sampling_rate=16_000), "text": datasets.Value("string"), "speaker": datasets.Value("string"), "gender": datasets.Value("string"), "age": datasets.Value("int16"), "accuracy": datasets.Value("int16"), "fluency": datasets.Value("int16"), "prosodic": datasets.Value("int16"), "total": datasets.Value("int16"), "words": datasets.Sequence( feature={ "text": datasets.Value("string"), "accuracy": datasets.Value("int16"), "stress": datasets.Value("int16"), "total": datasets.Value("int16"), "phones": datasets.Sequence(datasets.Value("string")), "phones-accuracy": datasets.Sequence( datasets.Value("float") ), "mispronunciations": datasets.Value("string"), } ), } ), homepage=_HOMEPAGE, license=_LICENSE, citation=_CITATION, ) def _split_generators(self, dl_manager): self.extracted_path = dl_manager.download_and_extract(_DL_URL) return [ datasets.SplitGenerator( name=datasets.Split.TRAIN, gen_kwargs={ "filepath": f"{self.extracted_path}/speechocean762-{_VERSION}/train/all-info.json" }, ), datasets.SplitGenerator( name=datasets.Split.TEST, gen_kwargs={ "filepath": f"{self.extracted_path}/speechocean762-{_VERSION}/test/all-info.json" }, ), ] def _generate_examples(self, filepath): with open(filepath, encoding="utf-8") as f: for key, row in json.load(f).items(): path = f"{self.extracted_path}/speechocean762-{_VERSION}/WAVE/SPEAKER{row['speaker']}/{key}.WAV" yield key, { "file": path, "audio": path, "text": row["text"], "speaker": row["speaker"], "gender": row["gender"], "age": row["age"], "accuracy": row["accuracy"], "fluency": row["fluency"], "prosodic": row["prosodic"], "total": row["total"], "words": row["words"], }