|
|
|
|
|
"""test set""" |
|
|
|
|
|
import csv |
|
import os |
|
import json |
|
|
|
import datasets |
|
from datasets.utils.py_utils import size_str |
|
from tqdm import tqdm |
|
|
|
|
|
_CITATION = """\ |
|
@inproceedings{panayotov2015librispeech, |
|
title={Librispeech: an ASR corpus based on public domain audio books}, |
|
author={Panayotov, Vassil and Chen, Guoguo and Povey, Daniel and Khudanpur, Sanjeev}, |
|
booktitle={Acoustics, Speech and Signal Processing (ICASSP), 2015 IEEE International Conference on}, |
|
pages={5206--5210}, |
|
year={2015}, |
|
organization={IEEE} |
|
} |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
Lorem ipsum |
|
""" |
|
|
|
|
|
_BASE_URL = "https://huggingface.co/datasets/shane062/FYP_Fine_Tuning" |
|
_DATA_URL = "dataset/audio/test/" |
|
_PROMPTS_URLS = {"test": "dataset/audio/test/test.csv"} |
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
class TestConfig(datasets.BuilderConfig): |
|
"""Lorem impsum.""" |
|
|
|
def __init__(self, name, **kwargs): |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
description = ( |
|
f"Lorem ipsum dolor sit amet, consectetur adipiscing elit, sed do eiusmod tempor " |
|
f"incididunt ut labore et dolore magna aliqua. Ut enim ad minim veniam, quis nostrud " |
|
f"exercitation ullamco laboris nisi ut aliquip ex ea commodo consequat. Duis aute irure " |
|
f"dolor in reprehenderit in voluptate velit esse cillum dolore eu fugiat nulla pariatur. " |
|
f"Excepteur sint occaecat cupidatat non proident, sunt in culpa qui officia deserunt " |
|
f"mollit anim id est laborum." |
|
) |
|
super(TestConfig, self).__init__( |
|
name=name, |
|
description=description, |
|
**kwargs, |
|
) |
|
|
|
class TestASR(datasets.GeneratorBasedBuilder): |
|
"""Lorem ipsum.""" |
|
|
|
|
|
BUILDER_CONFIGS = [ |
|
TestConfig( |
|
name="test-dataset", |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"file_name": datasets.Value("string"), |
|
"audio": datasets.Audio(sampling_rate=16_000), |
|
"transcription": datasets.Value("string") |
|
} |
|
), |
|
supervised_keys=None, |
|
homepage=_BASE_URL, |
|
citation=_CITATION |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
audio_path = dl_manager.download(_DATA_URL) |
|
local_extracted_archive = dl_manager.extract(audio_path) if not dl_manager.is_streaming else None |
|
meta_path = dl_manager.download(_PROMPTS_URLS) |
|
return [datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"meta_path": meta_path["test"], |
|
"audio_files": dl_manager.iter_archive(audio_path), |
|
"local_extracted_archive": local_extracted_archive, |
|
} |
|
)] |
|
|
|
def _generate_examples(self, meta_path, audio_files, local_extracted_archive): |
|
"""Lorem ipsum.""" |
|
data_fields = list(self._info().features.keys()) |
|
metadata = {} |
|
with open(meta_path, encoding="utf-8") as f: |
|
next(f) |
|
for row in f: |
|
print(row) |
|
r = row.split("\t") |
|
print(r) |
|
file_name = r[0] |
|
ngram = r[1] |
|
metadata[file_name] = {"file_name": file_name, |
|
"transcript": transcript} |
|
|
|
id_ = 0 |
|
for path, f in audio_files: |
|
print(path, f) |
|
_, audio_name = os.path.split(path) |
|
if audio_name in metadata: |
|
result = dict(metadata[audio_name]) |
|
path = os.path.join(local_extracted_archive, "test", path) if local_extracted_archive else path |
|
result["audio"] = {"path": path, "bytes":f.read()} |
|
yield id_, result |
|
id_ +=1 |
|
|