|
import json |
|
import textwrap |
|
|
|
import datasets |
|
from datasets.tasks import QuestionAnsweringExtractive |
|
|
|
|
|
_CITATION = """\ |
|
@article{tydiqa, |
|
title = {TyDi QA: A Benchmark for Information-Seeking Question Answering in Typologically Diverse Languages}, |
|
author = {Jonathan H. Clark and Eunsol Choi and Michael Collins and Dan Garrette and Tom Kwiatkowski and Vitaly Nikolaev and Jennimaria Palomaki} |
|
year = {2020}, |
|
journal = {Transactions of the Association for Computational Linguistics} |
|
} |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
TyDi QA is a question answering dataset covering 11 typologically diverse languages with 204K question-answer pairs. |
|
The languages of TyDi QA are diverse with regard to their typology -- the set of linguistic features that each language |
|
expresses -- such that we expect models performing well on this set to generalize across a large number of the languages |
|
in the world. It contains language phenomena that would not be found in English-only corpora. To provide a realistic |
|
information-seeking task and avoid priming effects, questions are written by people who want to know the answer, but |
|
don’t know the answer yet, (unlike SQuAD and its descendents) and the data is collected directly in each language without |
|
the use of translation (unlike MLQA and XQuAD). |
|
""" |
|
|
|
|
|
_LANG = ["arabic", "bengali", "english", "finnish", "indonesian", "japanese", "korean", "russian", "swahili", "telugu", "thai"] |
|
|
|
_URL = "https://huggingface.co/datasets/khalidalt/tydiqa-goldp/resolve/main/primary_tasks/{split}/{language}-{split}.jsonl" |
|
_VERSION = datasets.Version("1.1.0", "") |
|
|
|
|
|
class tydiqa_Primary(datasets.GeneratorBasedBuilder): |
|
BUILDER_CONFIGS = [ |
|
datasets.BuilderConfig( |
|
name=lang, |
|
description=f"tydiqa-primary language {lang}", |
|
version=_VERSION, |
|
) |
|
for lang in _LANG |
|
] |
|
|
|
|
|
def _info(self): |
|
|
|
|
|
return datasets.DatasetInfo( |
|
|
|
description=_DESCRIPTION, |
|
|
|
features=datasets.Features( |
|
{ |
|
"passage_answer_candidates": datasets.features.Sequence( |
|
{ |
|
"plaintext_start_byte": datasets.Value("int32"), |
|
"plaintext_end_byte": datasets.Value("int32"), |
|
} |
|
), |
|
"question_text": datasets.Value("string"), |
|
"document_title": datasets.Value("string"), |
|
"language": datasets.Value("string"), |
|
"annotations": datasets.features.Sequence( |
|
{ |
|
|
|
"passage_answer_candidate_index": datasets.Value("int32"), |
|
"minimal_answers_start_byte": datasets.Value("int32"), |
|
"minimal_answers_end_byte": datasets.Value("int32"), |
|
"yes_no_answer": datasets.Value("string"), |
|
} |
|
), |
|
"document_plaintext": datasets.Value("string"), |
|
|
|
"document_url": datasets.Value("string") |
|
|
|
} |
|
), |
|
|
|
|
|
|
|
supervised_keys=None, |
|
|
|
homepage="https://github.com/google-research-datasets/tydiqa", |
|
citation=_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
"""Returns SplitGenerators.""" |
|
|
|
|
|
|
|
language = self.config.name |
|
splits = {datasets.Split.TRAIN: "train", datasets.Split.VALIDATION: "dev"} |
|
|
|
data_urls = { |
|
split: _URL.format(language=language, split=splits[split]) for split in splits |
|
} |
|
|
|
dl_paths = dl_manager.download(data_urls) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=split, |
|
gen_kwargs={"filepath": dl_paths[split]}, |
|
) |
|
for split in splits |
|
] |
|
|
|
def _generate_examples(self, filepath): |
|
"""Yields examples.""" |
|
|
|
|
|
with open(filepath, encoding="utf-8") as f: |
|
for id_, row in enumerate(f): |
|
data = json.loads(row) |
|
passages = data["passage_answer_candidates"] |
|
end_byte = [passage["plaintext_end_byte"] for passage in passages] |
|
start_byte = [passage["plaintext_start_byte"] for passage in passages] |
|
title = data["document_title"] |
|
lang = data["language"] |
|
question = data["question_text"] |
|
annotations = data["annotations"] |
|
|
|
yes_no_answers = [annotation["yes_no_answer"] for annotation in annotations] |
|
min_answers_end_byte = [ |
|
annotation["minimal_answer"]["plaintext_end_byte"] for annotation in annotations |
|
] |
|
min_answers_start_byte = [ |
|
annotation["minimal_answer"]["plaintext_start_byte"] for annotation in annotations |
|
] |
|
passage_cand_answers = [ |
|
annotation["passage_answer"]["candidate_index"] for annotation in annotations |
|
] |
|
doc = data["document_plaintext"] |
|
|
|
url = data["document_url"] |
|
yield id_, { |
|
"passage_answer_candidates": { |
|
"plaintext_start_byte": start_byte, |
|
"plaintext_end_byte": end_byte, |
|
}, |
|
"question_text": question, |
|
"document_title": title, |
|
"language": lang, |
|
"annotations": { |
|
|
|
"passage_answer_candidate_index": passage_cand_answers, |
|
"minimal_answers_start_byte": min_answers_start_byte, |
|
"minimal_answers_end_byte": min_answers_end_byte, |
|
"yes_no_answer": yes_no_answers, |
|
}, |
|
"document_plaintext": doc, |
|
|
|
"document_url": url, |
|
} |