|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""The SuperGLUE benchmark.""" |
|
|
|
import json |
|
import os |
|
|
|
import datasets |
|
|
|
_CITATION = """\ |
|
""" |
|
|
|
|
|
_DESCRIPTION = """\ |
|
""" |
|
|
|
_HOMEPAGE = "" |
|
|
|
_LICENSE = "" |
|
|
|
_GLUE_CITATION = """\ |
|
@inproceedings{wang2019glue, |
|
title={{GLUE}: A Multi-Task Benchmark and Analysis Platform for Natural Language Understanding}, |
|
author={Wang, Alex and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R.}, |
|
note={In the Proceedings of ICLR.}, |
|
year={2019} |
|
} |
|
""" |
|
|
|
_GLUE_DESCRIPTION = """\ |
|
GLUE, the General Language Understanding Evaluation benchmark |
|
(https://gluebenchmark.com/) is a collection of resources for training, |
|
evaluating, and analyzing natural language understanding systems. |
|
|
|
""" |
|
_SST_DESCRIPTION = """\ |
|
The Stanford Sentiment Treebank consists of sentences from movie reviews and |
|
human annotations of their sentiment. The task is to predict the sentiment of a |
|
given sentence. We use the two-way (positive/negative) class split, and use only |
|
sentence-level labels.""" |
|
_SST_CITATION = """\ |
|
@inproceedings{socher2013recursive, |
|
title={Recursive deep models for semantic compositionality over a sentiment treebank}, |
|
author={Socher, Richard and Perelygin, Alex and Wu, Jean and Chuang, Jason and Manning, Christopher D and Ng, Andrew and Potts, Christopher}, |
|
booktitle={Proceedings of the 2013 conference on empirical methods in natural language processing}, |
|
pages={1631--1642}, |
|
year={2013} |
|
}""" |
|
_MRPC_DESCRIPTION = """\ |
|
The Microsoft Research Paraphrase Corpus (Dolan & Brockett, 2005) is a corpus of |
|
sentence pairs automatically extracted from online news sources, with human annotations |
|
for whether the sentences in the pair are semantically equivalent.""" |
|
_MRPC_CITATION = """\ |
|
@inproceedings{dolan2005automatically, |
|
title={Automatically constructing a corpus of sentential paraphrases}, |
|
author={Dolan, William B and Brockett, Chris}, |
|
booktitle={Proceedings of the Third International Workshop on Paraphrasing (IWP2005)}, |
|
year={2005} |
|
}""" |
|
_QQP_DESCRIPTION = """\ |
|
The Quora Question Pairs2 dataset is a collection of question pairs from the |
|
community question-answering website Quora. The task is to determine whether a |
|
pair of questions are semantically equivalent.""" |
|
_QQP_CITATION = """\ |
|
@online{WinNT, |
|
author = {Iyer, Shankar and Dandekar, Nikhil and Csernai, Kornel}, |
|
title = {First Quora Dataset Release: Question Pairs}, |
|
year = {2017}, |
|
url = {https://data.quora.com/First-Quora-Dataset-Release-Question-Pairs}, |
|
urldate = {2019-04-03} |
|
}""" |
|
_STSB_DESCRIPTION = """\ |
|
The Semantic Textual Similarity Benchmark (Cer et al., 2017) is a collection of |
|
sentence pairs drawn from news headlines, video and image captions, and natural |
|
language inference data. Each pair is human-annotated with a similarity score |
|
from 1 to 5.""" |
|
_STSB_CITATION = """\ |
|
@article{cer2017semeval, |
|
title={Semeval-2017 task 1: Semantic textual similarity-multilingual and cross-lingual focused evaluation}, |
|
author={Cer, Daniel and Diab, Mona and Agirre, Eneko and Lopez-Gazpio, Inigo and Specia, Lucia}, |
|
journal={arXiv preprint arXiv:1708.00055}, |
|
year={2017} |
|
}""" |
|
_MNLI_DESCRIPTION = """\ |
|
The Multi-Genre Natural Language Inference Corpus is a crowdsourced |
|
collection of sentence pairs with textual entailment annotations. Given a premise sentence |
|
and a hypothesis sentence, the task is to predict whether the premise entails the hypothesis |
|
(entailment), contradicts the hypothesis (contradiction), or neither (neutral). The premise sentences are |
|
gathered from ten different sources, including transcribed speech, fiction, and government reports. |
|
We use the standard test set, for which we obtained private labels from the authors, and evaluate |
|
on both the matched (in-domain) and mismatched (cross-domain) section. We also use and recommend |
|
the SNLI corpus as 550k examples of auxiliary training data.""" |
|
_MNLI_CITATION = """\ |
|
@InProceedings{N18-1101, |
|
author = "Williams, Adina |
|
and Nangia, Nikita |
|
and Bowman, Samuel", |
|
title = "A Broad-Coverage Challenge Corpus for |
|
Sentence Understanding through Inference", |
|
booktitle = "Proceedings of the 2018 Conference of |
|
the North American Chapter of the |
|
Association for Computational Linguistics: |
|
Human Language Technologies, Volume 1 (Long |
|
Papers)", |
|
year = "2018", |
|
publisher = "Association for Computational Linguistics", |
|
pages = "1112--1122", |
|
location = "New Orleans, Louisiana", |
|
url = "http://aclweb.org/anthology/N18-1101" |
|
} |
|
@article{bowman2015large, |
|
title={A large annotated corpus for learning natural language inference}, |
|
author={Bowman, Samuel R and Angeli, Gabor and Potts, Christopher and Manning, Christopher D}, |
|
journal={arXiv preprint arXiv:1508.05326}, |
|
year={2015} |
|
}""" |
|
_QNLI_DESCRIPTION = """\ |
|
The Stanford Question Answering Dataset is a question-answering |
|
dataset consisting of question-paragraph pairs, where one of the sentences in the paragraph (drawn |
|
from Wikipedia) contains the answer to the corresponding question (written by an annotator). We |
|
convert the task into sentence pair classification by forming a pair between each question and each |
|
sentence in the corresponding context, and filtering out pairs with low lexical overlap between the |
|
question and the context sentence. The task is to determine whether the context sentence contains |
|
the answer to the question. This modified version of the original task removes the requirement that |
|
the model select the exact answer, but also removes the simplifying assumptions that the answer |
|
is always present in the input and that lexical overlap is a reliable cue.""" |
|
_QNLI_CITATION = """\ |
|
@article{rajpurkar2016squad, |
|
title={Squad: 100,000+ questions for machine comprehension of text}, |
|
author={Rajpurkar, Pranav and Zhang, Jian and Lopyrev, Konstantin and Liang, Percy}, |
|
journal={arXiv preprint arXiv:1606.05250}, |
|
year={2016} |
|
}""" |
|
_WNLI_DESCRIPTION = """\ |
|
The Winograd Schema Challenge (Levesque et al., 2011) is a reading comprehension task |
|
in which a system must read a sentence with a pronoun and select the referent of that pronoun from |
|
a list of choices. The examples are manually constructed to foil simple statistical methods: Each |
|
one is contingent on contextual information provided by a single word or phrase in the sentence. |
|
To convert the problem into sentence pair classification, we construct sentence pairs by replacing |
|
the ambiguous pronoun with each possible referent. The task is to predict if the sentence with the |
|
pronoun substituted is entailed by the original sentence. We use a small evaluation set consisting of |
|
new examples derived from fiction books that was shared privately by the authors of the original |
|
corpus. While the included training set is balanced between two classes, the test set is imbalanced |
|
between them (65% not entailment). Also, due to a data quirk, the development set is adversarial: |
|
hypotheses are sometimes shared between training and development examples, so if a model memorizes the |
|
training examples, they will predict the wrong label on corresponding development set |
|
example. As with QNLI, each example is evaluated separately, so there is not a systematic correspondence |
|
between a model's score on this task and its score on the unconverted original task. We |
|
call converted dataset WNLI (Winograd NLI).""" |
|
_WNLI_CITATION = """\ |
|
@inproceedings{levesque2012winograd, |
|
title={The winograd schema challenge}, |
|
author={Levesque, Hector and Davis, Ernest and Morgenstern, Leora}, |
|
booktitle={Thirteenth International Conference on the Principles of Knowledge Representation and Reasoning}, |
|
year={2012} |
|
}""" |
|
|
|
_SUPER_GLUE_CITATION = """\ |
|
@article{wang2019superglue, |
|
title={SuperGLUE: A Stickier Benchmark for General-Purpose Language Understanding Systems}, |
|
author={Wang, Alex and Pruksachatkun, Yada and Nangia, Nikita and Singh, Amanpreet and Michael, Julian and Hill, Felix and Levy, Omer and Bowman, Samuel R}, |
|
journal={arXiv preprint arXiv:1905.00537}, |
|
year={2019} |
|
} |
|
|
|
Note that each SuperGLUE dataset has its own citation. Please see the source to |
|
get the correct citation for each contained dataset. |
|
""" |
|
|
|
_SUPER_GLUE_DESCRIPTION = """\ |
|
SuperGLUE (https://super.gluebenchmark.com/) is a new benchmark styled after |
|
GLUE with a new set of more difficult language understanding tasks, improved |
|
resources, and a new public leaderboard. |
|
|
|
""" |
|
|
|
_BOOLQ_DESCRIPTION = """\ |
|
BoolQ (Boolean Questions, Clark et al., 2019a) is a QA task where each example consists of a short |
|
passage and a yes/no question about the passage. The questions are provided anonymously and |
|
unsolicited by users of the Google search engine, and afterwards paired with a paragraph from a |
|
Wikipedia article containing the answer. Following the original work, we evaluate with accuracy.""" |
|
|
|
_CB_DESCRIPTION = """\ |
|
The CommitmentBank (De Marneffe et al., 2019) is a corpus of short texts in which at least |
|
one sentence contains an embedded clause. Each of these embedded clauses is annotated with the |
|
degree to which we expect that the person who wrote the text is committed to the truth of the clause. |
|
The resulting task framed as three-class textual entailment on examples that are drawn from the Wall |
|
Street Journal, fiction from the British National Corpus, and Switchboard. Each example consists |
|
of a premise containing an embedded clause and the corresponding hypothesis is the extraction of |
|
that clause. We use a subset of the data that had inter-annotator agreement above 0.85. The data is |
|
imbalanced (relatively fewer neutral examples), so we evaluate using accuracy and F1, where for |
|
multi-class F1 we compute the unweighted average of the F1 per class.""" |
|
|
|
_COPA_DESCRIPTION = """\ |
|
The Choice Of Plausible Alternatives (COPA, Roemmele et al., 2011) dataset is a causal |
|
reasoning task in which a system is given a premise sentence and two possible alternatives. The |
|
system must choose the alternative which has the more plausible causal relationship with the premise. |
|
The method used for the construction of the alternatives ensures that the task requires causal reasoning |
|
to solve. Examples either deal with alternative possible causes or alternative possible effects of the |
|
premise sentence, accompanied by a simple question disambiguating between the two instance |
|
types for the model. All examples are handcrafted and focus on topics from online blogs and a |
|
photography-related encyclopedia. Following the recommendation of the authors, we evaluate using |
|
accuracy.""" |
|
|
|
_RTE_DESCRIPTION = """\ |
|
The Recognizing Textual Entailment (RTE) datasets come from a series of annual competitions |
|
on textual entailment, the problem of predicting whether a given premise sentence entails a given |
|
hypothesis sentence (also known as natural language inference, NLI). RTE was previously included |
|
in GLUE, and we use the same data and format as before: We merge data from RTE1 (Dagan |
|
et al., 2006), RTE2 (Bar Haim et al., 2006), RTE3 (Giampiccolo et al., 2007), and RTE5 (Bentivogli |
|
et al., 2009). All datasets are combined and converted to two-class classification: entailment and |
|
not_entailment. Of all the GLUE tasks, RTE was among those that benefited from transfer learning |
|
the most, jumping from near random-chance performance (~56%) at the time of GLUE's launch to |
|
85% accuracy (Liu et al., 2019c) at the time of writing. Given the eight point gap with respect to |
|
human performance, however, the task is not yet solved by machines, and we expect the remaining |
|
gap to be difficult to close.""" |
|
|
|
_BOOLQ_CITATION = """\ |
|
@inproceedings{clark2019boolq, |
|
title={BoolQ: Exploring the Surprising Difficulty of Natural Yes/No Questions}, |
|
author={Clark, Christopher and Lee, Kenton and Chang, Ming-Wei, and Kwiatkowski, Tom and Collins, Michael, and Toutanova, Kristina}, |
|
booktitle={NAACL}, |
|
year={2019} |
|
}""" |
|
|
|
_CB_CITATION = """\ |
|
@article{de marneff_simons_tonhauser_2019, |
|
title={The CommitmentBank: Investigating projection in naturally occurring discourse}, |
|
journal={proceedings of Sinn und Bedeutung 23}, |
|
author={De Marneff, Marie-Catherine and Simons, Mandy and Tonhauser, Judith}, |
|
year={2019} |
|
}""" |
|
|
|
_COPA_CITATION = """\ |
|
@inproceedings{roemmele2011choice, |
|
title={Choice of plausible alternatives: An evaluation of commonsense causal reasoning}, |
|
author={Roemmele, Melissa and Bejan, Cosmin Adrian and Gordon, Andrew S}, |
|
booktitle={2011 AAAI Spring Symposium Series}, |
|
year={2011} |
|
}""" |
|
|
|
_RTE_CITATION = """\ |
|
@inproceedings{dagan2005pascal, |
|
title={The PASCAL recognising textual entailment challenge}, |
|
author={Dagan, Ido and Glickman, Oren and Magnini, Bernardo}, |
|
booktitle={Machine Learning Challenges Workshop}, |
|
pages={177--190}, |
|
year={2005}, |
|
organization={Springer} |
|
} |
|
@inproceedings{bar2006second, |
|
title={The second pascal recognising textual entailment challenge}, |
|
author={Bar-Haim, Roy and Dagan, Ido and Dolan, Bill and Ferro, Lisa and Giampiccolo, Danilo and Magnini, Bernardo and Szpektor, Idan}, |
|
booktitle={Proceedings of the second PASCAL challenges workshop on recognising textual entailment}, |
|
volume={6}, |
|
number={1}, |
|
pages={6--4}, |
|
year={2006}, |
|
organization={Venice} |
|
} |
|
@inproceedings{giampiccolo2007third, |
|
title={The third pascal recognizing textual entailment challenge}, |
|
author={Giampiccolo, Danilo and Magnini, Bernardo and Dagan, Ido and Dolan, Bill}, |
|
booktitle={Proceedings of the ACL-PASCAL workshop on textual entailment and paraphrasing}, |
|
pages={1--9}, |
|
year={2007}, |
|
organization={Association for Computational Linguistics} |
|
} |
|
@inproceedings{bentivogli2009fifth, |
|
title={The Fifth PASCAL Recognizing Textual Entailment Challenge.}, |
|
author={Bentivogli, Luisa and Clark, Peter and Dagan, Ido and Giampiccolo, Danilo}, |
|
booktitle={TAC}, |
|
year={2009} |
|
}""" |
|
|
|
|
|
|
|
|
|
_URL = "https://huggingface.co/datasets/KBLab/overlim/resolve/main/data/" |
|
_TASKS = { |
|
"boolq": "boolq.tar.gz", |
|
"cb": "cb.tar.gz", |
|
"copa": "copa.tar.gz", |
|
"mnli": "mnli.tar.gz", |
|
"mrpc": "mrpc.tar.gz", |
|
"qnli": "qnli.tar.gz", |
|
"qqp": "qqp.tar.gz", |
|
"rte": "rte.tar.gz", |
|
"sst": "sst.tar.gz", |
|
"stsb": "stsb.tar.gz", |
|
"wnli": "wnli.tar.gz" |
|
} |
|
_LANGUAGES = {"sv", "da", "nb"} |
|
|
|
|
|
class OverLimConfig(datasets.BuilderConfig): |
|
"""BuilderConfig for Suc.""" |
|
def __init__(self, name, description, features, citation, language, label_classes=("False", "True"), **kwargs): |
|
"""BuilderConfig for OverLim. |
|
""" |
|
self.full_name = name + "_" + language |
|
super(OverLimConfig, |
|
self).__init__(name=self.full_name, version=datasets.Version("1.0.2"), **kwargs) |
|
self.features = features + ["label"] |
|
self.label_classes = label_classes |
|
self.citation = citation |
|
self.description = description |
|
self.task_name = name |
|
self.language = language |
|
self.data_url = _TASKS[name] |
|
|
|
|
|
|
|
class OverLim(datasets.GeneratorBasedBuilder): |
|
"""OverLim""" |
|
|
|
BUILDER_CONFIGS = [[ |
|
OverLimConfig( |
|
name="boolq", |
|
description=_BOOLQ_DESCRIPTION, |
|
features=["question", "passage"], |
|
label_classes=["False", "True"], |
|
citation=_BOOLQ_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="cb", |
|
description=_CB_DESCRIPTION, |
|
features=["premise", "hypothesis"], |
|
label_classes=["entailment", "contradiction", "neutral"], |
|
citation=_CB_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="copa", |
|
description=_COPA_DESCRIPTION, |
|
label_classes=["choice1", "choice2"], |
|
|
|
|
|
features=["premise", "choice1", "choice2", "question"], |
|
citation=_COPA_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="rte", |
|
description=_RTE_DESCRIPTION, |
|
features=["premise", "hypothesis"], |
|
label_classes=["entailment", "not_entailment"], |
|
citation=_RTE_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="qqp", |
|
description=_QQP_DESCRIPTION, |
|
features=["text_a", "text_b"], |
|
label_classes=["not_duplicate", "duplicate"], |
|
citation=_QQP_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="qnli", |
|
description=_QNLI_DESCRIPTION, |
|
features=["premise", "hypothesis"], |
|
label_classes=["entailment", "not_entailment"], |
|
citation=_QNLI_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="stsb", |
|
description=_STSB_DESCRIPTION, |
|
features=["text_a", "text_b"], |
|
citation=_STSB_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="mnli", |
|
description=_MNLI_DESCRIPTION, |
|
features=["premise", "hypothesis"], |
|
label_classes=["entailment", "neutral", "contradiction"], |
|
citation=_MNLI_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="mrpc", |
|
description=_MRPC_DESCRIPTION, |
|
features=["text_a", "text_b"], |
|
label_classes=["not_equivalent", "equivalent"], |
|
citation=_MRPC_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="wnli", |
|
description=_WNLI_DESCRIPTION, |
|
features=["premise", "hypothesis"], |
|
label_classes=["not_entailment", "entailment"], |
|
citation=_WNLI_CITATION, |
|
language=lang, |
|
), |
|
OverLimConfig( |
|
name="sst", |
|
description=_SST_DESCRIPTION, |
|
features=["text"], |
|
label_classes=["negative", "positive"], |
|
citation=_SST_CITATION, |
|
language=lang, |
|
) |
|
|
|
] for lang in _LANGUAGES] |
|
BUILDER_CONFIGS = [element for inner in BUILDER_CONFIGS for element in inner] |
|
|
|
def _info(self): |
|
features = {feature: datasets.Value("string") for feature in self.config.features} |
|
features["idx"] = datasets.Value("int32") |
|
|
|
return datasets.DatasetInfo( |
|
description=_GLUE_DESCRIPTION + self.config.description, |
|
features=datasets.Features(features), |
|
homepage=_HOMEPAGE, |
|
citation=self.config.citation + "\n" + _SUPER_GLUE_CITATION, |
|
) |
|
|
|
def _split_generators(self, dl_manager): |
|
dl_dir = dl_manager.download_and_extract(os.path.join(_URL, self.config.language, self.config.data_url)) |
|
|
|
dl_dir = os.path.join(dl_dir, self.config.task_name) |
|
return [ |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TRAIN, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "train.jsonl"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.VALIDATION, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "val.jsonl"), |
|
}, |
|
), |
|
datasets.SplitGenerator( |
|
name=datasets.Split.TEST, |
|
gen_kwargs={ |
|
"data_file": os.path.join(dl_dir, "test.jsonl"), |
|
}, |
|
), |
|
] |
|
|
|
def _generate_examples(self, data_file): |
|
with open(data_file, encoding="utf-8") as f: |
|
for line in f: |
|
row = json.loads(line) |
|
example = {feature: row[feature] for feature in self.config.features} |
|
example["idx"] = row["idx"] |
|
|
|
if self.config.name == "copa": |
|
example["label"] = "choice2" if row["label"] else "choice1" |
|
else: |
|
example["label"] = _cast_label(row["label"]) |
|
yield example["idx"], example |
|
|
|
|
|
def _cast_label(label): |
|
"""Converts the label into the appropriate string version.""" |
|
if isinstance(label, str): |
|
return label |
|
elif isinstance(label, bool): |
|
return "True" if label else "False" |
|
return label |
|
|
|
|
|
|
|
|
|
|