Datasets:

Modalities:
Text
Formats:
parquet
Languages:
English
ArXiv:
Libraries:
Datasets
pandas
License:
system HF staff commited on
Commit
be9e317
0 Parent(s):

Update files from the datasets library (from 1.0.0)

Browse files

Release notes: https://github.com/huggingface/datasets/releases/tag/1.0.0

.gitattributes ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bin.* filter=lfs diff=lfs merge=lfs -text
5
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.model filter=lfs diff=lfs merge=lfs -text
12
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
13
+ *.onnx filter=lfs diff=lfs merge=lfs -text
14
+ *.ot filter=lfs diff=lfs merge=lfs -text
15
+ *.parquet filter=lfs diff=lfs merge=lfs -text
16
+ *.pb filter=lfs diff=lfs merge=lfs -text
17
+ *.pt filter=lfs diff=lfs merge=lfs -text
18
+ *.pth filter=lfs diff=lfs merge=lfs -text
19
+ *.rar filter=lfs diff=lfs merge=lfs -text
20
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
21
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
22
+ *.tflite filter=lfs diff=lfs merge=lfs -text
23
+ *.tgz filter=lfs diff=lfs merge=lfs -text
24
+ *.xz filter=lfs diff=lfs merge=lfs -text
25
+ *.zip filter=lfs diff=lfs merge=lfs -text
26
+ *.zstandard filter=lfs diff=lfs merge=lfs -text
27
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
dataset_infos.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"squad_v2": {"description": "combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers\n to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but \n also determine when no answer is supported by the paragraph and abstain from answering.\n", "citation": "@article{2016arXiv160605250R,\n author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},\n Konstantin and {Liang}, Percy},\n title = \"{SQuAD: 100,000+ Questions for Machine Comprehension of Text}\",\n journal = {arXiv e-prints},\n year = 2016,\n eid = {arXiv:1606.05250},\n pages = {arXiv:1606.05250},\narchivePrefix = {arXiv},\n eprint = {1606.05250},\n}\n", "homepage": "https://rajpurkar.github.io/SQuAD-explorer/", "license": "", "features": {"id": {"dtype": "string", "id": null, "_type": "Value"}, "title": {"dtype": "string", "id": null, "_type": "Value"}, "context": {"dtype": "string", "id": null, "_type": "Value"}, "question": {"dtype": "string", "id": null, "_type": "Value"}, "answers": {"feature": {"text": {"dtype": "string", "id": null, "_type": "Value"}, "answer_start": {"dtype": "int32", "id": null, "_type": "Value"}}, "length": -1, "id": null, "_type": "Sequence"}}, "supervised_keys": null, "builder_name": "squad_v2", "config_name": "squad_v2", "version": {"version_str": "2.0.0", "description": null, "datasets_version_to_prepare": null, "major": 2, "minor": 0, "patch": 0}, "splits": {"train": {"name": "train", "num_bytes": 116851642, "num_examples": 130319, "dataset_name": "squad_v2"}, "validation": {"name": "validation", "num_bytes": 11677230, "num_examples": 11873, "dataset_name": "squad_v2"}}, "download_checksums": {"https://rajpurkar.github.io/SQuAD-explorer/dataset/train-v2.0.json": {"num_bytes": 42123633, "checksum": "68dcfbb971bd3e96d5b46c7177b16c1a4e7d4bdef19fb204502738552dede002"}, "https://rajpurkar.github.io/SQuAD-explorer/dataset/dev-v2.0.json": {"num_bytes": 4370528, "checksum": "80a5225e94905956a6446d296ca1093975c4d3b3260f1d6c8f68bc2ab77182d8"}}, "download_size": 46494161, "dataset_size": 128528872, "size_in_bytes": 175023033}}
dummy/1.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60fcc35aebb5f739a33f9ca44c2b4f10d9e5d306e534ae04ee00e77e5180ae44
3
+ size 14198
dummy/squad_v2/2.0.0/dummy_data.zip ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:97acfe5d1bbd17d36d655c38c4239b50f65025b2919df42cf727ad1ea6cab72b
3
+ size 3364
squad_v2.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """TODO(squad_v2): Add a description here."""
2
+
3
+ from __future__ import absolute_import, division, print_function
4
+
5
+ import json
6
+ import os
7
+
8
+ import datasets
9
+
10
+
11
+ # TODO(squad_v2): BibTeX citation
12
+ _CITATION = """\
13
+ @article{2016arXiv160605250R,
14
+ author = {{Rajpurkar}, Pranav and {Zhang}, Jian and {Lopyrev},
15
+ Konstantin and {Liang}, Percy},
16
+ title = "{SQuAD: 100,000+ Questions for Machine Comprehension of Text}",
17
+ journal = {arXiv e-prints},
18
+ year = 2016,
19
+ eid = {arXiv:1606.05250},
20
+ pages = {arXiv:1606.05250},
21
+ archivePrefix = {arXiv},
22
+ eprint = {1606.05250},
23
+ }
24
+ """
25
+
26
+ _DESCRIPTION = """\
27
+ combines the 100,000 questions in SQuAD1.1 with over 50,000 unanswerable questions written adversarially by crowdworkers
28
+ to look similar to answerable ones. To do well on SQuAD2.0, systems must not only answer questions when possible, but
29
+ also determine when no answer is supported by the paragraph and abstain from answering.
30
+ """
31
+
32
+ _URL = "https://rajpurkar.github.io/SQuAD-explorer/dataset/"
33
+ _DEV_FILE = "dev-v2.0.json"
34
+ _TRAINING_FILE = "train-v2.0.json"
35
+
36
+
37
+ class SquadV2Config(datasets.BuilderConfig):
38
+ """BuilderConfig for SQUAD."""
39
+
40
+ def __init__(self, **kwargs):
41
+ """BuilderConfig for SQUADV2.
42
+
43
+ Args:
44
+ **kwargs: keyword arguments forwarded to super.
45
+ """
46
+ super(SquadV2Config, self).__init__(**kwargs)
47
+
48
+
49
+ class SquadV2(datasets.GeneratorBasedBuilder):
50
+ """TODO(squad_v2): Short description of my dataset."""
51
+
52
+ # TODO(squad_v2): Set up version.
53
+ BUILDER_CONFIGS = [
54
+ SquadV2Config(name="squad_v2", version=datasets.Version("2.0.0"), description="SQuAD plaint text version 2"),
55
+ ]
56
+
57
+ def _info(self):
58
+ # TODO(squad_v2): Specifies the datasets.DatasetInfo object
59
+ return datasets.DatasetInfo(
60
+ # This is the description that will appear on the datasets page.
61
+ description=_DESCRIPTION,
62
+ # datasets.features.FeatureConnectors
63
+ features=datasets.Features(
64
+ {
65
+ "id": datasets.Value("string"),
66
+ "title": datasets.Value("string"),
67
+ "context": datasets.Value("string"),
68
+ "question": datasets.Value("string"),
69
+ "answers": datasets.features.Sequence(
70
+ {
71
+ "text": datasets.Value("string"),
72
+ "answer_start": datasets.Value("int32"),
73
+ }
74
+ ),
75
+ # These are the features of your dataset like images, labels ...
76
+ }
77
+ ),
78
+ # If there's a common (input, target) tuple from the features,
79
+ # specify them here. They'll be used if as_supervised=True in
80
+ # builder.as_dataset.
81
+ supervised_keys=None,
82
+ # Homepage of the dataset for documentation
83
+ homepage="https://rajpurkar.github.io/SQuAD-explorer/",
84
+ citation=_CITATION,
85
+ )
86
+
87
+ def _split_generators(self, dl_manager):
88
+ """Returns SplitGenerators."""
89
+ # TODO(squad_v2): Downloads the data and defines the splits
90
+ # dl_manager is a datasets.download.DownloadManager that can be used to
91
+ # download and extract URLs
92
+ urls_to_download = {"train": os.path.join(_URL, _TRAINING_FILE), "dev": os.path.join(_URL, _DEV_FILE)}
93
+ downloaded_files = dl_manager.download_and_extract(urls_to_download)
94
+
95
+ return [
96
+ datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepath": downloaded_files["train"]}),
97
+ datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"]}),
98
+ ]
99
+
100
+ def _generate_examples(self, filepath):
101
+ """Yields examples."""
102
+ # TODO(squad_v2): Yields (key, example) tuples from the dataset
103
+ with open(filepath, encoding="utf-8") as f:
104
+ squad = json.load(f)
105
+ for example in squad["data"]:
106
+ title = example.get("title", "").strip()
107
+ for paragraph in example["paragraphs"]:
108
+ context = paragraph["context"].strip()
109
+ for qa in paragraph["qas"]:
110
+ question = qa["question"].strip()
111
+ id_ = qa["id"]
112
+
113
+ answer_starts = [answer["answer_start"] for answer in qa["answers"]]
114
+ answers = [answer["text"].strip() for answer in qa["answers"]]
115
+
116
+ # Features currently used are "context", "question", and "answers".
117
+ # Others are extracted here for the ease of future expansions.
118
+ yield id_, {
119
+ "title": title,
120
+ "context": context,
121
+ "question": question,
122
+ "id": id_,
123
+ "answers": {
124
+ "answer_start": answer_starts,
125
+ "text": answers,
126
+ },
127
+ }