Quentin Lhoest commited on
Commit
74e21ec
1 Parent(s): 0c17ffc

Release: 2.0.0

Browse files

Commit from https://github.com/huggingface/datasets/commit/983f46ddae2f5b253db2b3c5691d38c75241cadb

Files changed (2) hide show
  1. README.md +5 -3
  2. bnl_newspapers.py +51 -58
README.md CHANGED
@@ -16,7 +16,7 @@ licenses:
16
  - cc0-1-0
17
  multilinguality:
18
  - multilingual
19
- pretty_name: BnL newspapers
20
  size_categories:
21
  - 100K<n<1M
22
  source_datasets:
@@ -27,7 +27,7 @@ task_ids:
27
  - language-modeling
28
  ---
29
 
30
- # Dataset Card for BnL Newspapers
31
 
32
  ## Table of Contents
33
  - [Table of Contents](#table-of-contents)
@@ -96,7 +96,9 @@ An example instance from the datasets:
96
  'source': 'newspaper/luxwort/1853-03-23',
97
  'text': 'Asien. Eine neue Nedcrland-Post ist angekommen mil Nachrichten aus Calcutta bis zum 5. Febr.; Vom» vay, 12. Febr. ; Nangun und HongKong, 13. Jan. Die durch die letzte Post gebrachle Nachricht, der König von Ava sei durch seinen Bruder enlhronl worden, wird bestätigt. (K. Z.) Verantwortl. Herausgeber, F. Schümann.',
98
  'title': 'Asien.',
99
- 'url': 'http://www.eluxemburgensia.lu/webclient/DeliveryManager?pid=209701#panel:pp|issue:209701|article:DTL47'}
 
 
100
  ```
101
 
102
 
 
16
  - cc0-1-0
17
  multilinguality:
18
  - multilingual
19
+ pretty_name: BnL Historical Newspapers
20
  size_categories:
21
  - 100K<n<1M
22
  source_datasets:
 
27
  - language-modeling
28
  ---
29
 
30
+ # Dataset Card for BnL Historical Newspapers
31
 
32
  ## Table of Contents
33
  - [Table of Contents](#table-of-contents)
 
96
  'source': 'newspaper/luxwort/1853-03-23',
97
  'text': 'Asien. Eine neue Nedcrland-Post ist angekommen mil Nachrichten aus Calcutta bis zum 5. Febr.; Vom» vay, 12. Febr. ; Nangun und HongKong, 13. Jan. Die durch die letzte Post gebrachle Nachricht, der König von Ava sei durch seinen Bruder enlhronl worden, wird bestätigt. (K. Z.) Verantwortl. Herausgeber, F. Schümann.',
98
  'title': 'Asien.',
99
+ 'url': 'http://www.eluxemburgensia.lu/webclient/DeliveryManager?pid=209701#panel:pp|issue:209701|article:DTL47',
100
+ 'language': 'de'
101
+ }
102
  ```
103
 
104
 
bnl_newspapers.py CHANGED
@@ -14,9 +14,9 @@
14
  # limitations under the License.
15
  """Digitised historic newspapers from the BNL"""
16
 
 
17
  import xml.etree.ElementTree as ET
18
  from datetime import datetime
19
- from pathlib import Path
20
 
21
  import datasets
22
  from datasets.tasks import LanguageModeling
@@ -113,78 +113,71 @@ These newspapers cover 38 years of news (1841-1878) and include 510,505 extracte
113
  }
114
  )
115
  return datasets.DatasetInfo(
116
- # This is the description that will appear on the datasets page.
117
  description=_DESCRIPTION,
118
- # This defines the different columns of the dataset and their types
119
- features=features, # Here we define them above because they are different between the two configurations
120
- # If there's a common (input, target) tuple from the features,
121
- # specify them here. They'll be used if as_supervised=True in
122
- # builder.as_dataset.
123
- supervised_keys=None,
124
- # Homepage of the dataset for documentation
125
  homepage=_HOMEPAGE,
126
- # License for the dataset if available
127
  license=_LICENSE,
128
- # Citation for the dataset
129
  citation=_CITATION,
130
  task_templates=[LanguageModeling(text_column="text")],
131
  )
132
 
133
  def _split_generators(self, dl_manager):
134
- """Returns SplitGenerators."""
135
  _URL = self.config.data_url
136
  data_dir = dl_manager.download_and_extract(_URL)
137
  return [
138
  datasets.SplitGenerator(
139
  name=datasets.Split.TRAIN,
140
- # These kwargs will be passed to _generate_examples
141
  gen_kwargs={
142
- "dirpath": data_dir,
143
  },
144
  ),
145
  ]
146
 
147
- def _generate_examples(
148
- self,
149
- dirpath,
150
- ):
151
- """Yields examples as (key, example) tuples."""
152
- ns = {
153
- "": "http://www.openarchives.org/OAI/2.0/",
154
- "xsi": "http://www.w3.org/2001/XMLSchema-instance",
155
- "oai_dc": "http://www.openarchives.org/OAI/2.0/oai_dc/",
156
- "dc": "http://purl.org/dc/elements/1.1/",
157
- "dcterms": "http://purl.org/dc/terms/",
158
- }
159
- for id_, xml in enumerate(Path(dirpath).rglob("**/*.xml")):
160
- tree = ET.parse(open(xml, encoding="utf-8"))
161
- source = tree.find(".//dc:source", ns).text
162
- ark_id = tree.find(".//dc:identifier", ns).text
163
- ispartof = tree.find(".//dcterms:isPartOf", ns).text
164
- date = tree.find(".//dc:date", ns).text
165
- if date:
166
- date = datetime.strptime(date, "%Y-%m-%d")
167
- publisher = tree.find(".//dc:publisher", ns)
168
- if publisher is not None:
169
- publisher = publisher.text
170
- hasversion = tree.find(".//dcterms:hasVersion", ns).text
171
- description = tree.find(".//dc:description", ns).text
172
- title = tree.find(".//dc:title", ns).text
173
- article_type = tree.find(".//dc:type", ns).text
174
- extent = tree.find(".//dcterms:extent", ns).text
175
- language = tree.find(".//dc:language", ns)
176
- if language is not None:
177
- language = language.text
178
- yield id_, {
179
- "id": ark_id,
180
- "source": source,
181
- "url": hasversion,
182
- "title": title,
183
- "text": description,
184
- "pub_date": date,
185
- "publisher": publisher,
186
- "article_type": article_type,
187
- "extent": extent,
188
- "ispartof": ispartof,
189
- "language": language,
190
- }
 
 
 
 
 
14
  # limitations under the License.
15
  """Digitised historic newspapers from the BNL"""
16
 
17
+ import os
18
  import xml.etree.ElementTree as ET
19
  from datetime import datetime
 
20
 
21
  import datasets
22
  from datasets.tasks import LanguageModeling
 
113
  }
114
  )
115
  return datasets.DatasetInfo(
 
116
  description=_DESCRIPTION,
117
+ features=features,
 
 
 
 
 
 
118
  homepage=_HOMEPAGE,
 
119
  license=_LICENSE,
 
120
  citation=_CITATION,
121
  task_templates=[LanguageModeling(text_column="text")],
122
  )
123
 
124
  def _split_generators(self, dl_manager):
 
125
  _URL = self.config.data_url
126
  data_dir = dl_manager.download_and_extract(_URL)
127
  return [
128
  datasets.SplitGenerator(
129
  name=datasets.Split.TRAIN,
 
130
  gen_kwargs={
131
+ "paths": dl_manager.iter_files([data_dir]),
132
  },
133
  ),
134
  ]
135
 
136
+ def _generate_examples(self, paths):
137
+ key = 0
138
+ for path in paths:
139
+ if os.path.basename(path).endswith(".xml"):
140
+ data = parse_xml(path)
141
+ yield key, data
142
+ key += 1
143
+
144
+
145
+ def parse_xml(path):
146
+ ns = {
147
+ "": "http://www.openarchives.org/OAI/2.0/",
148
+ "xsi": "http://www.w3.org/2001/XMLSchema-instance",
149
+ "oai_dc": "http://www.openarchives.org/OAI/2.0/oai_dc/",
150
+ "dc": "http://purl.org/dc/elements/1.1/",
151
+ "dcterms": "http://purl.org/dc/terms/",
152
+ }
153
+ tree = ET.parse(path)
154
+ source = tree.find(".//dc:source", ns).text
155
+ ark_id = tree.find(".//dc:identifier", ns).text
156
+ ispartof = tree.find(".//dcterms:isPartOf", ns).text
157
+ date = tree.find(".//dc:date", ns).text
158
+ if date:
159
+ date = datetime.strptime(date, "%Y-%m-%d")
160
+ publisher = tree.find(".//dc:publisher", ns)
161
+ if publisher is not None:
162
+ publisher = publisher.text
163
+ hasversion = tree.find(".//dcterms:hasVersion", ns).text
164
+ description = tree.find(".//dc:description", ns).text
165
+ title = tree.find(".//dc:title", ns).text
166
+ article_type = tree.find(".//dc:type", ns).text
167
+ extent = tree.find(".//dcterms:extent", ns).text
168
+ language = tree.find(".//dc:language", ns)
169
+ if language is not None:
170
+ language = language.text
171
+ return {
172
+ "id": ark_id,
173
+ "source": source,
174
+ "url": hasversion,
175
+ "title": title,
176
+ "text": description,
177
+ "pub_date": date,
178
+ "publisher": publisher,
179
+ "article_type": article_type,
180
+ "extent": extent,
181
+ "ispartof": ispartof,
182
+ "language": language,
183
+ }