File size: 3,636 Bytes
b71d238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
04b3675
b71d238
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
# Lint as: python3
"""semantic and acoustic codes dataset with text.
"""


import glob
import os

import datasets
import torch


class TextSpeechCodesDatasetConfig(datasets.BuilderConfig):
    """BuilderConfig for Text-SpeechCodes dataset."""

    def __init__(self, **kwargs):
        super(TextSpeechCodesDatasetConfig, self).__init__(**kwargs)


class TextSpeechCodesDataset(datasets.GeneratorBasedBuilder):
    """Codes dataset."""

    BUILDER_CONFIGS = [
        TextSpeechCodesDatasetConfig(name="all", description="TextSpeechCodes dataset"),
    ]

    @property
    def manual_download_instructions(self):
        return (
            "Codes should be computed before using this dataset. "
            "`datasets.load_dataset('/path/to/this/script', name=all, data_dir='path/to/folder/folder_name/of/codes')`"
        )

    def _info(self):
        features = datasets.Features(
            {
                "id": datasets.Value("string"),
                "length": datasets.Value("int32"),
                "transcription": datasets.Value("string"),
                "acoustic_tokens": datasets.Array2D(shape=(None, 12), dtype="int16"),
                "semantic_tokens": datasets.Array2D(shape=(None, 1), dtype="int16"),
                "transcription_bytes": datasets.Sequence(datasets.Value("uint8")),
            }
        )

        return datasets.DatasetInfo(
            features=features,
        )

    def _split_generators(self, dl_manager):
        base_data_dir = os.path.abspath(os.path.expanduser(dl_manager.manual_dir))
        if not os.path.exists(base_data_dir):
            raise FileNotFoundError(
                f"{base_data_dir} does not exist. Make sure you insert a manual dir via "
                f"`datasets.load_dataset('/this/script', data_dir=...)` "
                f"that includes code files .pt files "
                f"dataset. Manual download instructions: {self.manual_download_instructions}"
            )

        train_data_dirs = glob.glob(os.path.join(base_data_dir, "**", "*.pt"), recursive=True)
        print(f"Found {len(train_data_dirs)} files")

        return [
            datasets.SplitGenerator(
                name=datasets.Split.TRAIN,
                gen_kwargs={"data_dirs": train_data_dirs},
            ),
        ]

    def _generate_examples(self, data_dirs):
        for key, path in enumerate(data_dirs):
            id_ = path.split("/")[-1].replace(".pt", "")

            data = torch.load(path, map_location="cpu", weights_only=False)
            for i, (k, v) in enumerate(data.items()):
                acoustic_tokens = v["acoustic_codes"]
                semantic_tokens = v["semantic_codes"]

                if acoustic_tokens.ndim == 3:
                    acoustic_tokens = acoustic_tokens.squeeze(0).transpose(0, 1)
                else:
                    acoustic_tokens = acoustic_tokens.transpose(0, 1)
                if semantic_tokens.ndim == 2:
                    semantic_tokens = semantic_tokens.transpose(0, 1)
                else:
                    semantic_tokens = semantic_tokens.unsqueeze(1)

                transcription = v["transcription"]
                transcription_bytes = list(transcription.encode("utf-8"))

                yield f"{id_}_{i}", {
                    "id": f"{id_}_{i}",
                    "length": semantic_tokens.shape[0] + len(transcription_bytes),
                    "transcription": transcription,
                    "transcription_bytes": transcription_bytes,
                    "acoustic_tokens": acoustic_tokens,
                    "semantic_tokens": semantic_tokens,
                }