|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""LUDWIG, (Language Understanding With Implied meaninG). The conversational implicature dataset.""" |
|
|
|
|
|
from typing import Dict, Union |
|
import numpy as np |
|
import copy |
|
import csv |
|
import os |
|
|
|
import datasets |
|
|
|
|
|
logger = datasets.logging.get_logger(__name__) |
|
|
|
|
|
_CITATION = """\ |
|
TBC |
|
""" |
|
|
|
_DESCRIPTION = """\ |
|
TODO |
|
""" |
|
|
|
_URL = "https://raw.githubusercontent.com/ucl-dark/ludwig/main/" |
|
_URLS = { |
|
"dev": _URL + "dev_conversational_implicatures.csv", |
|
"test": _URL + "test_conversational_implicatures.csv" |
|
} |
|
|
|
|
|
class LudwigConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for LUDWIG.""" |
|
|
|
def __init__(self, k: int, seed: int, **kwargs): |
|
"""BuilderConfig for LUDWIG. |
|
Args: |
|
**kwargs: keyword arguments forwarded to super. |
|
""" |
|
super(LudwigConfig, self).__init__(**kwargs) |
|
self.k = k |
|
self.seed = seed |
|
self.rng = np.random.default_rng(seed) |
|
|
|
def __eq__(self, other): |
|
return self.k == other.k and self.seed == other.seed |
|
|
|
def reset_rng(self): |
|
self.rng = np.random.default_rng(self.seed) |
|
|
|
|
|
class Ludwig(datasets.GeneratorBasedBuilder): |
|
"""LUDWIG: Conversational implicatures dataset.""" |
|
|
|
BUILDER_CONFIGS = [ |
|
LudwigConfig( |
|
name="0-shot", |
|
version=datasets.Version("1.0.0", ""), |
|
description="Plain text", |
|
k=0, |
|
seed=0, |
|
), |
|
LudwigConfig( |
|
name="1-shot", |
|
version=datasets.Version("1.0.0", ""), |
|
description="Plain text", |
|
k=1, |
|
seed=0 |
|
), |
|
LudwigConfig( |
|
name="5-shot", |
|
version=datasets.Version("1.0.0", ""), |
|
description="Plain text", |
|
k=5, |
|
seed=0 |
|
), |
|
LudwigConfig( |
|
name="10-shot", |
|
version=datasets.Version("1.0.0", ""), |
|
description="Plain text", |
|
k=10, |
|
seed=0 |
|
), |
|
LudwigConfig( |
|
name="15-shot", |
|
version=datasets.Version("1.0.0", ""), |
|
description="Plain text", |
|
k=15, |
|
seed=0 |
|
), |
|
LudwigConfig( |
|
name="30-shot", |
|
version=datasets.Version("1.0.0", ""), |
|
description="Plain text", |
|
k=30, |
|
seed=0 |
|
) |
|
] |
|
|
|
def _info(self): |
|
return datasets.DatasetInfo( |
|
description=_DESCRIPTION, |
|
features=datasets.Features( |
|
{ |
|
"id": datasets.Value("string"), |
|
"utterance": datasets.Value("string"), |
|
"response": datasets.Value("string"), |
|
"implicature": datasets.Value("string"), |
|
"incoherent_implicature": datasets.Value("string"), |
|
"prompts": datasets.features.Sequence( |
|
{ |
|
"utterance": datasets.Value("string"), |
|
"response": datasets.Value("string"), |
|
"implicature": datasets.Value("string"), |
|
"incoherent_implicature": datasets.Value("string"), |
|
} |
|
) |
|
} |
|
), |
|
|
|
|
|
supervised_keys=None, |
|
homepage="https://github.com/ucl-dark/ludwig", |
|
citation=_CITATION |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
downloaded_files = dl_manager.download_and_extract(_URLS) |
|
|
|
return [ |
|
datasets.SplitGenerator(name=datasets.Split.VALIDATION, gen_kwargs={"filepath": downloaded_files["dev"], |
|
"dev_filepath": downloaded_files["dev"], |
|
"k": self.config.k}), |
|
datasets.SplitGenerator(name=datasets.Split.TEST, gen_kwargs={"filepath": downloaded_files["test"], |
|
"dev_filepath": downloaded_files["dev"], |
|
"k": self.config.k}), |
|
] |
|
|
|
@staticmethod |
|
def _process_text(text): |
|
return text.strip("\n") |
|
|
|
def _filter_examples( |
|
self, input_line: Dict[str, str], |
|
) -> Union[None, Dict[str, str]]: |
|
""" |
|
Takes an input_line from the csv file and filters all examples |
|
where the implicature is not a simple yes or no. |
|
:param input_line: a line read from a csv file with data |
|
:param source: the source of the example |
|
:return: |
|
""" |
|
if not input_line: |
|
return None |
|
if "yes" in input_line["Implicature"].lower()[:5]: |
|
implicature = "yes" |
|
elif "no" in input_line["Implicature"].lower()[:4]: |
|
implicature = "no" |
|
else: |
|
return None |
|
response = self._process_text(input_line["Response utterance"]) |
|
example = { |
|
"utterance": self._process_text(input_line["Context utterance"]), |
|
"response": response, |
|
"implicature": implicature, |
|
} |
|
return example |
|
|
|
def get_negative_binary_example(self, example): |
|
""" |
|
Creates a false example for a binary implicature example. |
|
:param example: |
|
:return: the same dict as the input except for the implicature is negated (yes to no and vice-versa) |
|
""" |
|
if example["implicature"] == "yes": |
|
false_implicature = "no" |
|
elif example["implicature"] == "no": |
|
false_implicature = "yes" |
|
else: |
|
raise ValueError("Unknown implicature %s" % example["implicature"]) |
|
false_example = copy.deepcopy(example) |
|
false_example["implicature"] = false_implicature |
|
return false_example |
|
|
|
def read_data_csv( |
|
self, |
|
test_input_data_path: str, |
|
dev_input_data_path: str, |
|
): |
|
assert os.path.exists( |
|
test_input_data_path |
|
), "No input data file found at: %s\n" "Current working direction: %s" % ( |
|
test_input_data_path, |
|
os.getcwd(), |
|
) |
|
assert os.path.exists( |
|
dev_input_data_path |
|
), "No dev input data file found at: %s\n" "Current working direction: %s" % ( |
|
dev_input_data_path, |
|
os.getcwd(), |
|
) |
|
with open(test_input_data_path, newline="") as csvfile: |
|
with open(dev_input_data_path, newline="") as dev_csvfile: |
|
reader = csv.DictReader(csvfile) |
|
dev_reader = csv.DictReader(dev_csvfile) |
|
all_data = { |
|
"test_data": [], |
|
"dev_data": [], |
|
} |
|
for row in reader: |
|
example = self._filter_examples(row) |
|
if example is not None: |
|
negative_example = self.get_negative_binary_example(example)["implicature"] |
|
example = {**example, |
|
"incoherent_implicature": negative_example} |
|
all_data["test_data"].append(example) |
|
for row in dev_reader: |
|
example = self._filter_examples(row) |
|
if example is not None: |
|
negative_example = self.get_negative_binary_example(example)["implicature"] |
|
example = {**example, |
|
"incoherent_implicature": negative_example} |
|
all_data["dev_data"].append(example) |
|
return all_data |
|
|
|
def _get_prompt_examples(self, dev_data, k_shot=0): |
|
""" |
|
A function to parse the i-th example in self.data["data"] |
|
:param dev_data: list of examples to sample from |
|
:param k_shot: how many extra examples to parse from different indices than i |
|
:return: a parsed example |
|
""" |
|
if k_shot > 0: |
|
prompt_indices = self.config.rng.choice( |
|
range(len(dev_data)), k_shot, replace=False |
|
) |
|
prompt_examples = [dev_data[j] for j in prompt_indices] |
|
else: |
|
prompt_examples = [] |
|
return prompt_examples |
|
|
|
def _generate_examples(self, filepath, dev_filepath, k: int): |
|
"""This function returns the examples in the raw (text) form.""" |
|
logger.info("generating examples from = %s", filepath) |
|
logger.info("k-shot examples from = %s", dev_filepath) |
|
all_data = self.read_data_csv(filepath, dev_filepath) |
|
self.config.reset_rng() |
|
for i, example in enumerate(all_data["test_data"]): |
|
prompt_examples = self._get_prompt_examples(all_data["dev_data"], k) |
|
yield i, { |
|
**example, |
|
"prompts": prompt_examples, |
|
"id": i + 1, |
|
} |
|
|