Local paths in common voice (#3736)
Browse files* Merge generators for local files and streaming
* add the streaming parameter to _split_generators
* update common_voice
* patrick's comment:
- pass streaming to _generate_examples
- separate in two methods
* add is_streaming attribute to the dl managers
* revert the streaming parameter being passed to _split_generators
Co-authored-by: anton-l <aglozhkov@gmail.com>
Commit from https://github.com/huggingface/datasets/commit/e3c8e2541573b42b8dc23a4a29e197537d309bca
- common_voice.py +93 -17
common_voice.py
CHANGED
@@ -15,6 +15,8 @@
|
|
15 |
""" Common Voice Dataset"""
|
16 |
|
17 |
|
|
|
|
|
18 |
import datasets
|
19 |
from datasets.tasks import AutomaticSpeechRecognition
|
20 |
|
@@ -657,63 +659,135 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
657 |
|
658 |
def _split_generators(self, dl_manager):
|
659 |
"""Returns SplitGenerators."""
|
660 |
-
|
661 |
-
|
662 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
663 |
|
664 |
return [
|
665 |
datasets.SplitGenerator(
|
666 |
name=datasets.Split.TRAIN,
|
667 |
gen_kwargs={
|
668 |
-
"
|
669 |
-
"
|
|
|
670 |
"path_to_clips": path_to_clips,
|
671 |
},
|
672 |
),
|
673 |
datasets.SplitGenerator(
|
674 |
name=datasets.Split.TEST,
|
675 |
gen_kwargs={
|
676 |
-
"
|
677 |
-
"
|
|
|
678 |
"path_to_clips": path_to_clips,
|
679 |
},
|
680 |
),
|
681 |
datasets.SplitGenerator(
|
682 |
name=datasets.Split.VALIDATION,
|
683 |
gen_kwargs={
|
684 |
-
"
|
685 |
-
"
|
|
|
686 |
"path_to_clips": path_to_clips,
|
687 |
},
|
688 |
),
|
689 |
datasets.SplitGenerator(
|
690 |
name="other",
|
691 |
gen_kwargs={
|
692 |
-
"
|
693 |
-
"
|
|
|
694 |
"path_to_clips": path_to_clips,
|
695 |
},
|
696 |
),
|
697 |
datasets.SplitGenerator(
|
698 |
name="validated",
|
699 |
gen_kwargs={
|
700 |
-
"
|
701 |
-
"
|
|
|
702 |
"path_to_clips": path_to_clips,
|
703 |
},
|
704 |
),
|
705 |
datasets.SplitGenerator(
|
706 |
name="invalidated",
|
707 |
gen_kwargs={
|
708 |
-
"
|
709 |
-
"
|
|
|
710 |
"path_to_clips": path_to_clips,
|
711 |
},
|
712 |
),
|
713 |
]
|
714 |
|
715 |
-
def _generate_examples(self,
|
716 |
"""Yields examples."""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
717 |
data_fields = list(self._info().features.keys())
|
718 |
|
719 |
# audio is not a header of the csv files
|
@@ -722,7 +796,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
722 |
|
723 |
all_field_values = {}
|
724 |
metadata_found = False
|
725 |
-
for path, f in
|
726 |
if path == filepath:
|
727 |
metadata_found = True
|
728 |
lines = f.readlines()
|
@@ -752,5 +826,7 @@ class CommonVoice(datasets.GeneratorBasedBuilder):
|
|
752 |
|
753 |
# set audio feature
|
754 |
result["audio"] = {"path": path, "bytes": f.read()}
|
|
|
|
|
755 |
|
756 |
yield path, result
|
|
|
15 |
""" Common Voice Dataset"""
|
16 |
|
17 |
|
18 |
+
import os
|
19 |
+
|
20 |
import datasets
|
21 |
from datasets.tasks import AutomaticSpeechRecognition
|
22 |
|
|
|
659 |
|
660 |
def _split_generators(self, dl_manager):
|
661 |
"""Returns SplitGenerators."""
|
662 |
+
streaming = dl_manager.is_streaming
|
663 |
+
archive_path = dl_manager.download(_DATA_URL.format(self.config.name))
|
664 |
+
if streaming:
|
665 |
+
# Here we use iter_archive in streaming mode because dl_manager.download_and_extract
|
666 |
+
# doesn't work to stream TAR archives (we have to stream the files in the archive one by one).
|
667 |
+
#
|
668 |
+
# The iter_archive method returns an iterable of (path_within_archive, file_obj) for every
|
669 |
+
# file in the TAR archive.
|
670 |
+
#
|
671 |
+
archive_iterator = dl_manager.iter_archive(archive_path)
|
672 |
+
# we locate the data using the path within the archive
|
673 |
+
path_to_data = "/".join(["cv-corpus-6.1-2020-12-11", self.config.name])
|
674 |
+
path_to_clips = "/".join([path_to_data, "clips"])
|
675 |
+
metadata_filepaths = {
|
676 |
+
split: "/".join([path_to_data, f"{split}.tsv"])
|
677 |
+
for split in ["train", "test", "dev", "other", "validated", "invalidated"]
|
678 |
+
}
|
679 |
+
else:
|
680 |
+
# In non-streaming we can extract the archive locally as usual
|
681 |
+
extracted_dir = dl_manager.extract(archive_path)
|
682 |
+
archive_iterator = None
|
683 |
+
# we locate the data using the local path
|
684 |
+
path_to_data = os.path.join(extracted_dir, "cv-corpus-6.1-2020-12-11", self.config.name)
|
685 |
+
path_to_clips = os.path.join(path_to_data, "clips")
|
686 |
+
metadata_filepaths = {
|
687 |
+
split: os.path.join(path_to_data, f"{split}.tsv")
|
688 |
+
for split in ["train", "test", "dev", "other", "validated", "invalidated"]
|
689 |
+
}
|
690 |
|
691 |
return [
|
692 |
datasets.SplitGenerator(
|
693 |
name=datasets.Split.TRAIN,
|
694 |
gen_kwargs={
|
695 |
+
"streaming": streaming,
|
696 |
+
"archive_iterator": archive_iterator,
|
697 |
+
"filepath": metadata_filepaths["train"],
|
698 |
"path_to_clips": path_to_clips,
|
699 |
},
|
700 |
),
|
701 |
datasets.SplitGenerator(
|
702 |
name=datasets.Split.TEST,
|
703 |
gen_kwargs={
|
704 |
+
"streaming": streaming,
|
705 |
+
"archive_iterator": archive_iterator,
|
706 |
+
"filepath": metadata_filepaths["test"],
|
707 |
"path_to_clips": path_to_clips,
|
708 |
},
|
709 |
),
|
710 |
datasets.SplitGenerator(
|
711 |
name=datasets.Split.VALIDATION,
|
712 |
gen_kwargs={
|
713 |
+
"streaming": streaming,
|
714 |
+
"archive_iterator": archive_iterator,
|
715 |
+
"filepath": metadata_filepaths["dev"],
|
716 |
"path_to_clips": path_to_clips,
|
717 |
},
|
718 |
),
|
719 |
datasets.SplitGenerator(
|
720 |
name="other",
|
721 |
gen_kwargs={
|
722 |
+
"streaming": streaming,
|
723 |
+
"archive_iterator": archive_iterator,
|
724 |
+
"filepath": metadata_filepaths["other"],
|
725 |
"path_to_clips": path_to_clips,
|
726 |
},
|
727 |
),
|
728 |
datasets.SplitGenerator(
|
729 |
name="validated",
|
730 |
gen_kwargs={
|
731 |
+
"streaming": streaming,
|
732 |
+
"archive_iterator": archive_iterator,
|
733 |
+
"filepath": metadata_filepaths["validated"],
|
734 |
"path_to_clips": path_to_clips,
|
735 |
},
|
736 |
),
|
737 |
datasets.SplitGenerator(
|
738 |
name="invalidated",
|
739 |
gen_kwargs={
|
740 |
+
"streaming": streaming,
|
741 |
+
"archive_iterator": archive_iterator,
|
742 |
+
"filepath": metadata_filepaths["invalidated"],
|
743 |
"path_to_clips": path_to_clips,
|
744 |
},
|
745 |
),
|
746 |
]
|
747 |
|
748 |
+
def _generate_examples(self, streaming, archive_iterator, filepath, path_to_clips):
|
749 |
"""Yields examples."""
|
750 |
+
if streaming:
|
751 |
+
yield from self._generate_examples_streaming(archive_iterator, filepath, path_to_clips)
|
752 |
+
else:
|
753 |
+
yield from self._generate_examples_non_streaming(filepath, path_to_clips)
|
754 |
+
|
755 |
+
def _generate_examples_non_streaming(self, filepath, path_to_clips):
|
756 |
+
|
757 |
+
data_fields = list(self._info().features.keys())
|
758 |
+
|
759 |
+
# audio is not a header of the csv files
|
760 |
+
data_fields.remove("audio")
|
761 |
+
path_idx = data_fields.index("path")
|
762 |
+
|
763 |
+
with open(filepath, encoding="utf-8") as f:
|
764 |
+
lines = f.readlines()
|
765 |
+
headline = lines[0]
|
766 |
+
|
767 |
+
column_names = headline.strip().split("\t")
|
768 |
+
assert (
|
769 |
+
column_names == data_fields
|
770 |
+
), f"The file should have {data_fields} as column names, but has {column_names}"
|
771 |
+
|
772 |
+
for id_, line in enumerate(lines[1:]):
|
773 |
+
field_values = line.strip().split("\t")
|
774 |
+
|
775 |
+
# set absolute path for mp3 audio file
|
776 |
+
field_values[path_idx] = os.path.join(path_to_clips, field_values[path_idx])
|
777 |
+
|
778 |
+
# if data is incomplete, fill with empty values
|
779 |
+
if len(field_values) < len(data_fields):
|
780 |
+
field_values += (len(data_fields) - len(field_values)) * ["''"]
|
781 |
+
|
782 |
+
result = {key: value for key, value in zip(data_fields, field_values)}
|
783 |
+
|
784 |
+
# set audio feature
|
785 |
+
result["audio"] = field_values[path_idx]
|
786 |
+
|
787 |
+
yield id_, result
|
788 |
+
|
789 |
+
def _generate_examples_streaming(self, archive_iterator, filepath, path_to_clips):
|
790 |
+
"""Yields examples in streaming mode."""
|
791 |
data_fields = list(self._info().features.keys())
|
792 |
|
793 |
# audio is not a header of the csv files
|
|
|
796 |
|
797 |
all_field_values = {}
|
798 |
metadata_found = False
|
799 |
+
for path, f in archive_iterator:
|
800 |
if path == filepath:
|
801 |
metadata_found = True
|
802 |
lines = f.readlines()
|
|
|
826 |
|
827 |
# set audio feature
|
828 |
result["audio"] = {"path": path, "bytes": f.read()}
|
829 |
+
# set path to None since the path doesn't exist locally in streaming mode
|
830 |
+
result["path"] = None
|
831 |
|
832 |
yield path, result
|