File size: 3,694 Bytes
5dda1b3 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 |
import json
import datasets
from datasets.features import Sequence
_BASE_URL = "https://huggingface.co/datasets/bavard/personachat_truecased/raw/main"
_URLS = {
"full": {
"train": _BASE_URL + "/persona_chat_truecased_full_train.json",
"valid": _BASE_URL + "/persona_chat_truecased_full_valid.json"
},
"sample": {
"train": _BASE_URL + "/persona_chat_truecased_sample_train.json",
"valid": _BASE_URL + "/persona_chat_truecased_sample_valid.json"
}
}
_DESCRIPTION = """\
A version of the PersonaChat dataset that has been true-cased, and also has been given more normalized punctuation.
The original PersonaChat dataset is in all lower case, and has extra space around each clause/sentence separating
punctuation mark. This version of the dataset has more of a natural language look, with sentence capitalization,
proper noun capitalization, and normalized whitespace. Also, each dialogue turn includes a pool of distractor
candidate responses, which can be used by a multiple choice regularization loss during training.
"""
_CITATION = """\
@article{zhang2018personalizing,
title={Personalizing dialogue agents: I have a dog, do you have pets too?},
author={Zhang, Saizheng and Dinan, Emily and Urbanek, Jack and Szlam, Arthur and Kiela, Douwe and Weston, Jason},
journal={arXiv preprint arXiv:1801.07243},
year={2018}
}
"""
class PersonachatTruecased(datasets.DatasetBuilder):
"""
Version of the PersonaChat dataset that includes true-casing, normalized punctuation, and candidate distractor
responses for each dialogue turn, for including a multiple choice regularzation loss while training.
"""
VERSION = datasets.Version("1.0.0")
BUILDER_CONFIGS = [
datasets.BuilderConfig(name="full", version=VERSION, description="The full dataset."),
datasets.BuilderConfig(name="sample", version=VERSION, description="A sample sample of the dataset, useful for testing.")
]
DEFAULT_CONFIG_NAME = "full"
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features({
"personality": Sequence(datasets.Value("string")),
"candidates": Sequence(datasets.Value("string")),
"history": Sequence(datasets.Value("string")),
"conv_id": datasets.Value("int32"),
"utterance_idx": datasets.Value("int32")
}),
citation=_CITATION
)
def _split_generators(self, dl_manager: datasets.DownloadManager):
split_paths = dl_manager.download(_URLS[self.config.name])
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
# These kwargs will be passed to _generate_examples
gen_kwargs={"data_path": split_paths["train"]}
),
datasets.SplitGenerator(
name=datasets.Split.VALIDATION,
gen_kwargs={"data_path": split_paths["valid"]}
)
]
def _generate_examples(self, data_path: str):
with open(data_path) as f:
data = json.load(f)
for conv_id, conv in enumerate(data):
personality = conv["personality"]
for utterance_idx, utterance in enumerate(conv["utterances"]):
id_ = f"{conv_id}-{utterance_idx}"
yield id_, {
"personality": personality,
"candidates": utterance["candidates"],
"history": utterance["history"],
"conv_id": conv_id,
"utterance_idx": utterance_idx
}
|