File size: 3,662 Bytes
50e2efd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
091b60d
50e2efd
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4798e82
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
# Copyright 2022 The HuggingFace Datasets Authors and Dan Saattrup Nielsen.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#     http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python build script for the ScandiWiki dataset."""


import json
from pathlib import Path
from typing import List

from datasets import Version
from datasets.builder import BuilderConfig, GeneratorBasedBuilder
from datasets.download import DownloadManager
from datasets.features import Features, Value
from datasets.info import DatasetInfo
from datasets.splits import SplitGenerator

_DESCRIPTION = """
ScandiWiki is a parsed and deduplicated version of the Danish, Norwegian Bokmål,
Norwegian Nynorsk, Swedish, Icelandic and Faroese Wikipedia corpora, as of January
2023.
"""

_LICENSE = "CC BY-SA 4.0"
_BASE_URL = (
    "https://huggingface.co/datasets/alexandrainst/scandi-wiki/resolve/main/data"
)

# _CITATION = """
# @InProceedings{huggingface:dataset,
# title = {ScandiWiki: A Scandinavian Wikipedia Dump},
# author={Dan Saattrup Nielsen},
# year={2022}
# }
# """


class ScandiWiki(GeneratorBasedBuilder):
    """Scandinavian part of Wikipedia."""

    VERSION = Version("1.0.1")

    BUILDER_CONFIGS = [
        BuilderConfig(
            name="da",
            version=VERSION,
            description="The deduplicated Danish part of Wikipedia.",
        ),
        BuilderConfig(
            name="sv",
            version=VERSION,
            description="The deduplicated Swedish part of Wikipedia.",
        ),
        BuilderConfig(
            name="nb",
            version=VERSION,
            description="The deduplicated Norwegian Bokmål part of Wikipedia.",
        ),
        BuilderConfig(
            name="nn",
            version=VERSION,
            description="The deduplicated Norwegian Nynorsk part of Wikipedia.",
        ),
        BuilderConfig(
            name="is",
            version=VERSION,
            description="The deduplicated Icelandic part of Wikipedia.",
        ),
        BuilderConfig(
            name="fo",
            version=VERSION,
            description="The deduplicated Faroese part of Wikipedia.",
        ),
    ]

    def _info(self) -> DatasetInfo:
        features = Features(
            {
                "id": Value("string"),
                "url": Value("string"),
                "title": Value("string"),
                "text": Value("string"),
            }
        )
        return DatasetInfo(
            description=_DESCRIPTION,
            features=features,
            license=_LICENSE,
            # homepage=_HOMEPAGE,
            # citation=_CITATION,
        )

    def _split_generators(self, dl_manager: DownloadManager) -> List[SplitGenerator]:
        url = f"{_BASE_URL}/{self.config.name}.jsonl"
        downloaded_file = dl_manager.download_and_extract(url)
        return [
            SplitGenerator(
                name="train",
                gen_kwargs=dict(filepath=downloaded_file),
            ),
        ]

    def _generate_examples(self, filepath: str):
        with Path(filepath).open(encoding="utf-8") as f:
            for key, row in enumerate(f):
                data = json.loads(row)
                yield key, data