Datasets:
Tasks:
Question Answering
Sub-tasks:
extractive-qa
Languages:
English
Size:
100K<n<1M
ArXiv:
License:
# coding=utf-8 | |
# Copyright 2020 The TensorFlow Datasets Authors and the HuggingFace Datasets Authors. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
# Lint as: python3 | |
"""SearchQA dataset.""" | |
import itertools | |
import json | |
import datasets | |
_CITATION = r""" | |
@article{DBLP:journals/corr/DunnSHGCC17, | |
author = {Matthew Dunn and | |
Levent Sagun and | |
Mike Higgins and | |
V. Ugur G{\"{u}}ney and | |
Volkan Cirik and | |
Kyunghyun Cho}, | |
title = {SearchQA: {A} New Q{\&}A Dataset Augmented with Context from a | |
Search Engine}, | |
journal = {CoRR}, | |
volume = {abs/1704.05179}, | |
year = {2017}, | |
url = {http://arxiv.org/abs/1704.05179}, | |
archivePrefix = {arXiv}, | |
eprint = {1704.05179}, | |
timestamp = {Mon, 13 Aug 2018 16:47:09 +0200}, | |
biburl = {https://dblp.org/rec/journals/corr/DunnSHGCC17.bib}, | |
bibsource = {dblp computer science bibliography, https://dblp.org} | |
} | |
""" | |
# pylint: disable=line-too-long | |
_DESCRIPTION = """ | |
We publicly release a new large-scale dataset, called SearchQA, for machine comprehension, or question-answering. Unlike recently released datasets, such as DeepMind | |
CNN/DailyMail and SQuAD, the proposed SearchQA was constructed to reflect a full pipeline of general question-answering. That is, we start not from an existing article | |
and generate a question-answer pair, but start from an existing question-answer pair, crawled from J! Archive, and augment it with text snippets retrieved by Google. | |
Following this approach, we built SearchQA, which consists of more than 140k question-answer pairs with each pair having 49.6 snippets on average. Each question-answer-context | |
tuple of the SearchQA comes with additional meta-data such as the snippet's URL, which we believe will be valuable resources for future research. We conduct human evaluation | |
as well as test two baseline methods, one simple word selection and the other deep learning based, on the SearchQA. We show that there is a meaningful gap between the human | |
and machine performances. This suggests that the proposed dataset could well serve as a benchmark for question-answering. | |
""" | |
_DL_URLS = { | |
"raw_jeopardy": [ | |
"data/raw_jeopardy/000000-029999.zip", | |
"data/raw_jeopardy/030000-49999.zip", | |
"data/raw_jeopardy/050000-059999.zip", | |
"data/raw_jeopardy/060000-089999.zip", | |
"data/raw_jeopardy/090000-119999.zip", | |
"data/raw_jeopardy/120000-149999.zip", | |
"data/raw_jeopardy/150000-179999.zip", | |
"data/raw_jeopardy/180000-216929.zip", | |
], | |
"train_test_val": { | |
"train": "data/train_test_val/train.zip", | |
"test": "data/train_test_val/test.zip", | |
"validation": "data/train_test_val/val.zip", | |
}, | |
} | |
# pylint: enable=line-too-long | |
class SearchQaConfig(datasets.BuilderConfig): | |
"""BuilderConfig for SearchQA.""" | |
def __init__(self, data_url, **kwargs): | |
"""BuilderConfig for SearchQA | |
Args: | |
**kwargs: keyword arguments forwarded to super. | |
""" | |
super(SearchQaConfig, self).__init__(version=datasets.Version("1.0.0", ""), **kwargs) | |
self.data_url = data_url | |
class SearchQa(datasets.GeneratorBasedBuilder): | |
"""Search QA Dataset.""" | |
BUILDER_CONFIGS = [SearchQaConfig(name=name, description="", data_url=_DL_URLS[name]) for name in _DL_URLS.keys()] | |
def _info(self): | |
return datasets.DatasetInfo( | |
description=_DESCRIPTION + "\n" + self.config.description, | |
features=datasets.Features( | |
{ | |
"category": datasets.Value("string"), | |
"air_date": datasets.Value("string"), | |
"question": datasets.Value("string"), | |
"value": datasets.Value("string"), | |
"answer": datasets.Value("string"), | |
"round": datasets.Value("string"), | |
"show_number": datasets.Value("int32"), | |
"search_results": datasets.features.Sequence( | |
{ | |
"urls": datasets.Value("string"), | |
"snippets": datasets.Value("string"), | |
"titles": datasets.Value("string"), | |
"related_links": datasets.Value("string"), | |
} | |
) | |
# These are the features of your dataset like images, labels ... | |
} | |
), | |
homepage="https://github.com/nyu-dl/dl4ir-searchQA", | |
citation=_CITATION, | |
) | |
def _split_generators(self, dl_manager): | |
"""Returns SplitGenerators.""" | |
data_dirs = dl_manager.download_and_extract(_DL_URLS[self.config.name]) | |
if self.config.name == "raw_jeopardy": | |
filepaths = itertools.chain.from_iterable(dl_manager.iter_files(data_dir) for data_dir in data_dirs) | |
return [ | |
datasets.SplitGenerator(name=datasets.Split.TRAIN, gen_kwargs={"filepaths": filepaths}), | |
] | |
elif self.config.name == "train_test_val": | |
return [ | |
datasets.SplitGenerator(name=split, gen_kwargs={"filepaths": dl_manager.iter_files(data_dirs[split])}) | |
for split in (datasets.Split.TRAIN, datasets.Split.TEST, datasets.Split.VALIDATION) | |
] | |
def _generate_examples(self, filepaths): | |
"""Yields examples.""" | |
for i, filepath in enumerate(filepaths): | |
with open(filepath, encoding="utf-8") as f: | |
data = json.load(f) | |
category = data["category"] | |
air_date = data["air_date"] | |
question = data["question"] | |
value = data["value"] | |
answer = data["answer"] | |
round_ = data["round"] | |
show_number = int(data["show_number"]) | |
search_results = data["search_results"] | |
urls = [result["url"] for result in search_results] | |
snippets = [result["snippet"] for result in search_results] | |
titles = [result["title"] for result in search_results] | |
related_links = [ | |
result["related_links"] if result["related_links"] else "" for result in search_results | |
] | |
yield i, { | |
"category": category, | |
"air_date": air_date, | |
"question": question, | |
"value": value, | |
"answer": answer, | |
"round": round_, | |
"category": category, | |
"show_number": show_number, | |
"search_results": { | |
"urls": urls, | |
"snippets": snippets, | |
"titles": titles, | |
"related_links": related_links, | |
}, | |
} | |