Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
Dask
License:

Convert dataset to Parquet

#6
by albertvillanova HF staff - opened
README.md CHANGED
@@ -71,8 +71,17 @@ dataset_info:
71
  - name: validation
72
  num_bytes: 1211859418
73
  num_examples: 3461
74
- download_size: 192528922
75
  dataset_size: 16315602701
 
 
 
 
 
 
 
 
 
76
  ---
77
 
78
  # Dataset Card for Narrative QA
 
71
  - name: validation
72
  num_bytes: 1211859418
73
  num_examples: 3461
74
+ download_size: 3232805701
75
  dataset_size: 16315602701
76
+ configs:
77
+ - config_name: default
78
+ data_files:
79
+ - split: train
80
+ path: data/train-*
81
+ - split: test
82
+ path: data/test-*
83
+ - split: validation
84
+ path: data/validation-*
85
  ---
86
 
87
  # Dataset Card for Narrative QA
data/test-00000-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:638fe830caab599fd40333b5c6783a83e12e431de35c9dde730d329596456f66
3
+ size 8559262
data/test-00001-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:51acc457d81b38e4cfb048436a239fe415aafa9c559577456720774c0cc5b407
3
+ size 44507679
data/test-00002-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:83f7672070ca24edb5a2844271cfbb18f351fc1fd84be945411a6814a13eb2e3
3
+ size 101411128
data/test-00003-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2b305c1ac27799d710172855265ba665b8fc258430a97a7994d976414db03021
3
+ size 221739348
data/test-00004-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c69949be3fe1d57f1171f929f18646eb99410784162b15632e778085a15abb79
3
+ size 60842742
data/test-00005-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38b87f8c0a4f0dc139808693bd019e2d0eb3a193f0b52fd52977b20b61f793c4
3
+ size 121182898
data/test-00006-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d0c894370b0cd1462582e89d39924a4e66db5caf69fa8d9ecc75e80284e7091a
3
+ size 243201775
data/test-00007-of-00008.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9eddfda2441b23a7828886c3dad94986e693b989bf663a324c69c87870f2c3b
3
+ size 58520238
data/train-00000-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c503bf6f9dc99f624cf23d592cbab393bcf8ec83ae72ee8bda9fe78dc975684a
3
+ size 9799947
data/train-00001-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d6916659f26644945dcdbcba0cb153af8ce42c2482b6e6ee455af8a03aae0287
3
+ size 67193596
data/train-00002-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d876d0df116532d476e2cf9b0a6f22d3844b9a2837a66ff431fe5473b9651acb
3
+ size 232543098
data/train-00003-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:beef361d669198b2ca60b3906df894474bfc29ab86e3ed5b901deaf818bc1034
3
+ size 27237144
data/train-00004-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66703341abb91e44300106d7fd0c87e9a4d86b1e6bbd33ebe855fc557f73126f
3
+ size 88349123
data/train-00005-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:162781ba93739daad62786144572cd8ff3c77b63ca1a1c0c788c02d74548aeba
3
+ size 206395732
data/train-00006-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a7d3c25d3185c3c0df8272664494fbc53b78e5c79f53efc4bf620aee36a9495
3
+ size 39407269
data/train-00007-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a33131a90ca1de261125065d66f6a2bd9a4403f34b3d2d525e77488ad905f54d
3
+ size 132425662
data/train-00008-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:207b66118655892930c495da7b7a036c03d167b57b248a7c27bdfd937e1be29c
3
+ size 10295374
data/train-00009-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b36dc10fd6a1f2d66ca3b26705c1a98ceb0c7c8ea0c99e91b168a59fd86d357
3
+ size 49453135
data/train-00010-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:abc135b05617d6e627197b2d43715a7e8f1ea2262d750c9b2d2b23c152e841f2
3
+ size 125852344
data/train-00011-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:674c0876d1f2f60ef1f3b8f0d7d48d0f731baefd59d12c4b2b7ab715a5ad301f
3
+ size 13545900
data/train-00012-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:66253913e5c161d37eef2d5212e54ae26f63ddd73dac1966dd3a55800e16a516
3
+ size 105091430
data/train-00013-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edfb135333a47a9c7a307af8d01474a598bf2db307e16b9be026f375ed972363
3
+ size 135517273
data/train-00014-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67c5b1dcc311b2a12c102f32d2f49758a97d7928cb578527cf3df2242e3c6e5c
3
+ size 34954998
data/train-00015-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3317553c52f6093ee3a36b94861d9de5c4b34d2adf4ebbd38fb0072974b6125c
3
+ size 73411063
data/train-00016-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1eda7b63549691eaffb613afbb5f87f1f65a74f4562546e9f6e4afb86c2e62b8
3
+ size 193825786
data/train-00017-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f0b1b763c5b7fbed56b6a264cee4b2bff4ad4a84abe6fdee3282eca434cbddd
3
+ size 61561710
data/train-00018-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:48183a438c60d8fe80a1fab74f871db3a3ec2de3799607aaa791940272acab99
3
+ size 106649283
data/train-00019-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9edd6ce0ddb59e9e71424d2441a22ea5afdc64459efb3337e54d2096589f86a7
3
+ size 194924664
data/train-00020-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7348a1895950b08039f72abadcede635886b0af1e9ee33a79a5bd5af7123542e
3
+ size 74241909
data/train-00021-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:198cca049bb959dd668920ce9ac3b14e68ba9537da9824dd49b52fcbe4f04a30
3
+ size 177583097
data/train-00022-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9d5333d8d0132bc3cf7662f4c491c4ab00b29e1562c04f444b3e8fb01478aeef
3
+ size 11929083
data/train-00023-of-00024.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cac81336b130e9265f11f508825e7c9f16c247f9bb09e2893a16d78b04490c7b
3
+ size 97778313
data/validation-00000-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bd4cfb854ec4c22b98583fa4b6b16f5e6f7461d8bfc13cfe8e7eb12b30871d0
3
+ size 9995351
data/validation-00001-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2f0728b3f819c1ccf67a1d0e1c37bfe8523063a46a5742275770d3db2c2ed799
3
+ size 24882497
data/validation-00002-of-00003.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:3f6da16ec6688f788947023d84408bb7af71de7fd379d86b4ea517c42297d39b
3
+ size 67995850
narrativeqa.py DELETED
@@ -1,174 +0,0 @@
1
- # coding=utf-8
2
- # Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor.
3
- #
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
- #
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
- #
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
- """NarrativeQA Reading Comprehension Challenge"""
16
-
17
-
18
- import csv
19
- import os
20
-
21
- import datasets
22
-
23
-
24
- _CITATION = """\
25
- @article{kocisky-etal-2018-narrativeqa,
26
- title = "The {N}arrative{QA} Reading Comprehension Challenge",
27
- author = "Ko{\v{c}}isk{\'y}, Tom{\'a}{\v{s}} and
28
- Schwarz, Jonathan and
29
- Blunsom, Phil and
30
- Dyer, Chris and
31
- Hermann, Karl Moritz and
32
- Melis, G{\'a}bor and
33
- Grefenstette, Edward",
34
- editor = "Lee, Lillian and
35
- Johnson, Mark and
36
- Toutanova, Kristina and
37
- Roark, Brian",
38
- journal = "Transactions of the Association for Computational Linguistics",
39
- volume = "6",
40
- year = "2018",
41
- address = "Cambridge, MA",
42
- publisher = "MIT Press",
43
- url = "https://aclanthology.org/Q18-1023",
44
- doi = "10.1162/tacl_a_00023",
45
- pages = "317--328",
46
- abstract = "Reading comprehension (RC){---}in contrast to information retrieval{---}requires integrating information and reasoning about events, entities, and their relations across a full document. Question answering is conventionally used to assess RC ability, in both artificial agents and children learning to read. However, existing RC datasets and tasks are dominated by questions that can be solved by selecting answers using superficial information (e.g., local context similarity or global term frequency); they thus fail to test for the essential integrative aspect of RC. To encourage progress on deeper comprehension of language, we present a new dataset and set of tasks in which the reader must answer questions about stories by reading entire books or movie scripts. These tasks are designed so that successfully answering their questions requires understanding the underlying narrative rather than relying on shallow pattern matching or salience. We show that although humans solve the tasks easily, standard RC models struggle on the tasks presented here. We provide an analysis of the dataset and the challenges it presents.",
47
- }
48
- """
49
-
50
- _DESCRIPTION = """\
51
- The NarrativeQA dataset for question answering on long documents (movie scripts, books). It includes the list of documents with Wikipedia summaries, links to full stories, and questions and answers.
52
- """
53
-
54
- # Source:
55
- # - full_text: https://storage.googleapis.com/huggingface-nlp/datasets/narrative_qa/narrativeqa_full_text.zip
56
- # - repo: https://github.com/deepmind/narrativeqa/archive/master.zip
57
- _URLS = {
58
- "full_text": "data/narrativeqa_full_text.zip",
59
- "repo": "data/narrativeqa-master.zip",
60
- }
61
-
62
-
63
- class NarrativeQa(datasets.GeneratorBasedBuilder):
64
- """NarrativeQA: Question answering on long-documents"""
65
-
66
- def _info(self):
67
- return datasets.DatasetInfo(
68
- description=_DESCRIPTION,
69
- citation=_CITATION,
70
- features=datasets.Features(
71
- {
72
- "document": {
73
- "id": datasets.Value("string"),
74
- "kind": datasets.Value("string"),
75
- "url": datasets.Value("string"),
76
- "file_size": datasets.Value("int32"),
77
- "word_count": datasets.Value("int32"),
78
- "start": datasets.Value("string"),
79
- "end": datasets.Value("string"),
80
- "summary": {
81
- "text": datasets.Value("string"),
82
- "tokens": datasets.features.Sequence(datasets.Value("string")),
83
- "url": datasets.Value("string"),
84
- "title": datasets.Value("string"),
85
- },
86
- "text": datasets.Value("string"),
87
- },
88
- "question": {
89
- "text": datasets.Value("string"),
90
- "tokens": datasets.features.Sequence(datasets.Value("string")),
91
- },
92
- "answers": [
93
- {
94
- "text": datasets.Value("string"),
95
- "tokens": datasets.features.Sequence(datasets.Value("string")),
96
- }
97
- ],
98
- }
99
- ),
100
- homepage="https://github.com/deepmind/narrativeqa",
101
- )
102
-
103
- def _split_generators(self, dl_manager):
104
- """Returns SplitGenerators."""
105
-
106
- dl_dir = dl_manager.download_and_extract(_URLS)
107
- dl_dir["repo"] = os.path.join(dl_dir["repo"], "narrativeqa-master")
108
-
109
- return [
110
- datasets.SplitGenerator(
111
- name=datasets.Split.TRAIN,
112
- gen_kwargs={"repo_dir": dl_dir["repo"], "full_text_dir": dl_dir["full_text"], "split": "train"},
113
- ),
114
- datasets.SplitGenerator(
115
- name=datasets.Split.TEST,
116
- gen_kwargs={"repo_dir": dl_dir["repo"], "full_text_dir": dl_dir["full_text"], "split": "test"},
117
- ),
118
- datasets.SplitGenerator(
119
- name=datasets.Split.VALIDATION,
120
- gen_kwargs={"repo_dir": dl_dir["repo"], "full_text_dir": dl_dir["full_text"], "split": "valid"},
121
- ),
122
- ]
123
-
124
- def _generate_examples(self, repo_dir, full_text_dir, split):
125
- """Yields examples."""
126
- documents = {}
127
- with open(os.path.join(repo_dir, "documents.csv"), encoding="utf-8") as f:
128
- reader = csv.DictReader(f)
129
- for row in reader:
130
- if row["set"] != split:
131
- continue
132
- documents[row["document_id"]] = row
133
-
134
- summaries = {}
135
- with open(os.path.join(repo_dir, "third_party", "wikipedia", "summaries.csv"), encoding="utf-8") as f:
136
- reader = csv.DictReader(f)
137
- for row in reader:
138
- if row["set"] != split:
139
- continue
140
- summaries[row["document_id"]] = row
141
-
142
- with open(os.path.join(repo_dir, "qaps.csv"), encoding="utf-8") as f:
143
- reader = csv.DictReader(f)
144
- for id_, row in enumerate(reader):
145
- if row["set"] != split:
146
- continue
147
- document_id = row["document_id"]
148
- document = documents[document_id]
149
- summary = summaries[document_id]
150
- full_text = open(os.path.join(full_text_dir, document_id + ".content"), encoding="latin-1").read()
151
- res = {
152
- "document": {
153
- "id": document["document_id"],
154
- "kind": document["kind"],
155
- "url": document["story_url"],
156
- "file_size": document["story_file_size"],
157
- "word_count": document["story_word_count"],
158
- "start": document["story_start"],
159
- "end": document["story_end"],
160
- "summary": {
161
- "text": summary["summary"],
162
- "tokens": summary["summary_tokenized"].split(),
163
- "url": document["wiki_url"],
164
- "title": document["wiki_title"],
165
- },
166
- "text": full_text,
167
- },
168
- "question": {"text": row["question"], "tokens": row["question_tokenized"].split()},
169
- "answers": [
170
- {"text": row["answer1"], "tokens": row["answer1_tokenized"].split()},
171
- {"text": row["answer2"], "tokens": row["answer2_tokenized"].split()},
172
- ],
173
- }
174
- yield id_, res