Datasets:

Modalities:
Text
Formats:
parquet
ArXiv:
Libraries:
Datasets
pandas
License:
albertvillanova HF staff commited on
Commit
a6b15b4
1 Parent(s): ca7c9b3

Update loading script

Browse files
Files changed (1) hide show
  1. wiki_lingua.py +34 -56
wiki_lingua.py CHANGED
@@ -15,7 +15,7 @@
15
  """TODO: Add a description here."""
16
 
17
 
18
- import pickle
19
 
20
  import datasets
21
 
@@ -43,27 +43,28 @@ _HOMEPAGE = "https://github.com/esdurmus/Wikilingua"
43
 
44
  _LICENSE = "CC BY-NC-SA 3.0"
45
 
46
- # Download links
47
- _URLs = {
48
- "arabic": "https://drive.google.com/uc?export=download&id=1__EjA6oZsgXQpggPm-h54jZu3kP6Y6zu",
49
- "chinese": "https://drive.google.com/uc?export=download&id=1TuWH7uwu6V90QWmZn25qhou1rm97Egmn",
50
- "czech": "https://drive.google.com/uc?export=download&id=1GcUN6mytEcOMBBOvjJOQzBmEkc-LdgQg",
51
- "dutch": "https://drive.google.com/uc?export=download&id=1-w-0uqaC6hnRn1F_3XqJEvi09zlcTIhX",
52
- "english": "https://drive.google.com/uc?export=download&id=11wMGqNVSwwk6zUnDaJEgm3qT71kAHeff",
53
- "french": "https://drive.google.com/uc?export=download&id=1Uit4Og1pk-br_0UJIO5sdhApyhTuHzqo",
54
- "german": "https://drive.google.com/uc?export=download&id=1meSNZHxd_0TZLKCRCYGN-Ke3IA5c1qOE",
55
- "hindi": "https://drive.google.com/uc?export=download&id=1ZyFGufe4puX3vjGPbp4xg9Hca3Gwq22g",
56
- "indonesian": "https://drive.google.com/uc?export=download&id=1PGa8j1_IqxiGTc3SU6NMB38sAzxCPS34",
57
- "italian": "https://drive.google.com/uc?export=download&id=1okwGJiOZmTpNRNgJLCnjFF4Q0H1z4l6_",
58
- "japanese": "https://drive.google.com/uc?export=download&id=1Z2ty5hU0tIGRZRDlFQZLO7b5vijRfvo0",
59
- "korean": "https://drive.google.com/uc?export=download&id=1cqu_YAgvlyVSzzjcUyP1Cz7q0k8Pw7vN",
60
- "portuguese": "https://drive.google.com/uc?export=download&id=1GTHUJxxmjLmG2lnF9dwRgIDRFZaOY3-F",
61
- "russian": "https://drive.google.com/uc?export=download&id=1fUR3MqJ8jTMka6owA0S-Fe6aHmiophc_",
62
- "spanish": "https://drive.google.com/uc?export=download&id=1KtMDsoYNukGP89PLujQTGVgt37cOARs5",
63
- "thai": "https://drive.google.com/uc?export=download&id=1QsV8C5EPJrQl37mwva_5-IJOrCaOi2tH",
64
- "turkish": "https://drive.google.com/uc?export=download&id=1M1M5yIOyjKWGprc3LUeVVwxgKXxgpqxm",
65
- "vietnamese": "https://drive.google.com/uc?export=download&id=17FGi8KI9N9SuGe7elM8qU8_3fx4sfgTr",
66
- }
 
67
 
68
 
69
  class WikiLingua(datasets.GeneratorBasedBuilder):
@@ -83,32 +84,12 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
83
  # data = datasets.load_dataset('my_dataset', 'first_domain')
84
  # data = datasets.load_dataset('my_dataset', 'second_domain')
85
  BUILDER_CONFIGS = [
86
- datasets.BuilderConfig(name="arabic", version=VERSION, description="A subset of article-summary in Arabic"),
87
- datasets.BuilderConfig(name="chinese", version=VERSION, description="A subset of article-summary in Chinese"),
88
- datasets.BuilderConfig(name="czech", version=VERSION, description="A subset of article-summary in Czech"),
89
- datasets.BuilderConfig(name="dutch", version=VERSION, description="A subset of article-summary in Dutch"),
90
- datasets.BuilderConfig(name="english", version=VERSION, description="A subset of article-summary in English"),
91
- datasets.BuilderConfig(name="french", version=VERSION, description="A subset of article-summary in French"),
92
- datasets.BuilderConfig(name="german", version=VERSION, description="A subset of article-summary in German"),
93
- datasets.BuilderConfig(name="hindi", version=VERSION, description="A subset of article-summary in Hindi"),
94
- datasets.BuilderConfig(
95
- name="indonesian", version=VERSION, description="A subset of article-summary in Indonesian"
96
- ),
97
- datasets.BuilderConfig(name="italian", version=VERSION, description="A subset of article-summary in Italian"),
98
- datasets.BuilderConfig(
99
- name="japanese", version=VERSION, description="A subset of article-summary in Japanese"
100
- ),
101
- datasets.BuilderConfig(name="korean", version=VERSION, description="A subset of article-summary in Korean"),
102
  datasets.BuilderConfig(
103
- name="portuguese", version=VERSION, description="A subset of article-summary in Portuguese"
104
- ),
105
- datasets.BuilderConfig(name="russian", version=VERSION, description="A subset of article-summary in Russian"),
106
- datasets.BuilderConfig(name="spanish", version=VERSION, description="A subset of article-summary in Spanish"),
107
- datasets.BuilderConfig(name="thai", version=VERSION, description="A subset of article-summary in Thai"),
108
- datasets.BuilderConfig(name="turkish", version=VERSION, description="A subset of article-summary in Turkish"),
109
- datasets.BuilderConfig(
110
- name="vietnamese", version=VERSION, description="A subset of article-summary in Vietnamese"
111
- ),
112
  ]
113
 
114
  DEFAULT_CONFIG_NAME = "english"
@@ -162,16 +143,13 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
162
 
163
  def _split_generators(self, dl_manager):
164
  """Returns SplitGenerators."""
165
- my_urls = _URLs[self.config.name]
166
- # See create_dummy.py to create new dummy data
167
- train_fname = dl_manager.download_and_extract(my_urls)
168
  return [
169
  datasets.SplitGenerator(
170
  name=datasets.Split.TRAIN,
171
  # These kwargs will be passed to _generate_examples
172
  gen_kwargs={
173
- "filepath": train_fname,
174
- "split": "train",
175
  },
176
  ),
177
  ]
@@ -189,9 +167,9 @@ class WikiLingua(datasets.GeneratorBasedBuilder):
189
 
190
  return processed_article
191
 
192
- def _generate_examples(self, filepath, split):
193
  """Yields examples."""
194
  with open(filepath, "rb") as f:
195
- data = pickle.load(f)
196
- for id_, row in enumerate(data.items()):
197
- yield id_, {"url": row[0], "article": self._process_article(row[1])}
 
15
  """TODO: Add a description here."""
16
 
17
 
18
+ import json
19
 
20
  import datasets
21
 
 
43
 
44
  _LICENSE = "CC BY-NC-SA 3.0"
45
 
46
+ # Download link
47
+ _URL = "data/{language}.jsonl.gz"
48
+ _LANGUAGES = [
49
+ "arabic",
50
+ "chinese",
51
+ "czech",
52
+ "dutch",
53
+ "english",
54
+ "french",
55
+ "german",
56
+ "hindi",
57
+ "indonesian",
58
+ "italian",
59
+ "japanese",
60
+ "korean",
61
+ "portuguese",
62
+ "russian",
63
+ "spanish",
64
+ "thai",
65
+ "turkish",
66
+ "vietnamese",
67
+ ]
68
 
69
 
70
  class WikiLingua(datasets.GeneratorBasedBuilder):
 
84
  # data = datasets.load_dataset('my_dataset', 'first_domain')
85
  # data = datasets.load_dataset('my_dataset', 'second_domain')
86
  BUILDER_CONFIGS = [
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  datasets.BuilderConfig(
88
+ name=lang,
89
+ version=datasets.Version("1.1.1"),
90
+ description=f"A subset of article-summary in {lang.capitalize()}",
91
+ )
92
+ for lang in _LANGUAGES
 
 
 
 
93
  ]
94
 
95
  DEFAULT_CONFIG_NAME = "english"
 
143
 
144
  def _split_generators(self, dl_manager):
145
  """Returns SplitGenerators."""
146
+ filepath = dl_manager.download_and_extract(_URL.format(language=self.config.name))
 
 
147
  return [
148
  datasets.SplitGenerator(
149
  name=datasets.Split.TRAIN,
150
  # These kwargs will be passed to _generate_examples
151
  gen_kwargs={
152
+ "filepath": filepath,
 
153
  },
154
  ),
155
  ]
 
167
 
168
  return processed_article
169
 
170
+ def _generate_examples(self, filepath):
171
  """Yields examples."""
172
  with open(filepath, "rb") as f:
173
+ for id_, line in enumerate(f):
174
+ row = json.loads(line)
175
+ yield id_, {"url": row["url"], "article": self._process_article(row["article"])}