File size: 4,070 Bytes
75bce8d 5c3480a 75bce8d 5c3480a 75bce8d ee25399 5c3480a 75bce8d 93b6495 75bce8d |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 |
import datasets
from datasets import load_dataset
import json
import os
class MoreHopQAConfig(datasets.BuilderConfig):
"""BuilderConfig for MoreHopQA."""
def __init__(self, data_path, **kwargs):
"""BuilderConfig for MoreHopQA.
Args:
data_path: string, path to the data files containing the dataset.
**kwargs: keyword arguments forwarded to super.
"""
super(MoreHopQAConfig, self).__init__(**kwargs)
self.data_path = data_path
class MoreHopQA(datasets.GeneratorBasedBuilder):
"""MoreHopQA: A dataset for multi-hop question answering."""
BUILDER_CONFIG_CLASS = MoreHopQAConfig
BUILDER_CONFIGS = [
MoreHopQAConfig(
name="verified",
version=datasets.Version("1.0.0", ""),
description="MoreHopQA: A dataset for multi-hop question answering.",
data_path="data/with_human_verification.json",
),
MoreHopQAConfig(
name="unverified",
version=datasets.Version("1.0.0", ""),
description="MoreHopQA: A dataset for multi-hop question answering.",
data_path="data/without_human_verification.json",
),
]
DEFAULT_CONFIG_NAME = "verified"
def _info(self):
return datasets.DatasetInfo(
features=datasets.Features({
"question": datasets.Value("string"),
"context": datasets.Sequence({
"title": datasets.Value("string"),
"paragraphs": datasets.Sequence(datasets.Value("string"))
}),
"answer": datasets.Value("string"),
"previous_question": datasets.Value("string"),
"previous_answer": datasets.Value("string"),
"question_decomposition": datasets.Sequence({
"sub_id": datasets.Value("string"),
"question": datasets.Value("string"),
"answer": datasets.Value("string"),
"paragraph_support_title": datasets.Value("string")
}),
"question_on_last_hop": datasets.Value("string"),
"answer_type": datasets.Value("string"),
"previous_answer_type": datasets.Value("string"),
"no_of_hops": datasets.Value("int32"),
"reasoning_type": datasets.Value("string"),
}),
)
def _split_generators(self, dl_manager):
"""Returns SplitGenerators."""
return [
datasets.SplitGenerator(
name=datasets.Split.TEST,
gen_kwargs={"filepath": os.path.join(self.config.data_path)}
)
]
def _generate_examples(self, filepath):
"""Yields examples."""
with open(filepath, encoding="utf-8") as f:
data = json.load(f)
for item in data:
yield item['_id'], {
"question": item["question"],
"context": [{
"title": subitem[0], # title is the first item in the sublist
"content": subitem[1] # paragraphs are the second item
} for subitem in item.get("context", [])],
"answer": item["answer"],
"previous_question": item["previous_question"],
"previous_answer": item["previous_answer"],
"question_decomposition": [{
"sub_id": subitem["sub_id"],
"question": subitem["question"],
"answer": subitem["answer"],
"paragraph_support_title": subitem["paragraph_support_title"]
} for subitem in item["question_decomposition"]],
"question_on_last_hop": item["ques_on_last_hop"],
"answer_type": item["answer_type"],
"previous_answer_type":item["previous_answer_type"],
"no_of_hops": item["no_of_hops"],
"reasoning_type": item["reasoning_type"],
} |