albertvillanova HF staff commited on
Commit
a4fdb7b
1 Parent(s): 17289f0

Delete loading script

Browse files
Files changed (1) hide show
  1. kor_nli.py +0 -121
kor_nli.py DELETED
@@ -1,121 +0,0 @@
1
- """TODO(kor_nli): Add a description here."""
2
-
3
-
4
- import os
5
-
6
- import datasets
7
-
8
-
9
- # TODO(kor_nli): BibTeX citation
10
- _CITATION = """\
11
- @article{ham2020kornli,
12
- title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},
13
- author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},
14
- journal={arXiv preprint arXiv:2004.03289},
15
- year={2020}
16
- }
17
- """
18
-
19
- # TODO(kor_nli):
20
- _DESCRIPTION = """ Korean Natural Language Inference datasets
21
- """
22
- _URL = "data.zip"
23
-
24
-
25
- class KorNLIConfig(datasets.BuilderConfig):
26
- """BuilderConfig for KorNLI."""
27
-
28
- def __init__(self, **kwargs):
29
- """BuilderConfig for KorNLI.
30
-
31
- Args:
32
-
33
- **kwargs: keyword arguments forwarded to super.
34
- """
35
- # Version 1.1.0 remove empty document and summary strings.
36
- super(KorNLIConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
37
-
38
-
39
- class KorNli(datasets.GeneratorBasedBuilder):
40
- """TODO(kor_nli): Short description of my dataset."""
41
-
42
- # TODO(kor_nli): Set up version.
43
- VERSION = datasets.Version("1.0.0")
44
- BUILDER_CONFIGS = [
45
- KorNLIConfig(name="multi_nli", description="Korean multi NLI datasets"),
46
- KorNLIConfig(name="snli", description="Korean SNLI dataset"),
47
- KorNLIConfig(name="xnli", description="Korean XNLI dataset"),
48
- ]
49
-
50
- def _info(self):
51
- # TODO(kor_nli): Specifies the datasets.DatasetInfo object
52
- return datasets.DatasetInfo(
53
- # This is the description that will appear on the datasets page.
54
- description=_DESCRIPTION,
55
- # datasets.features.FeatureConnectors
56
- features=datasets.Features(
57
- {
58
- # These are the features of your dataset like images, labels ...
59
- "premise": datasets.Value("string"),
60
- "hypothesis": datasets.Value("string"),
61
- "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
62
- }
63
- ),
64
- # If there's a common (input, target) tuple from the features,
65
- # specify them here. They'll be used if as_supervised=True in
66
- # builder.as_dataset.
67
- supervised_keys=None,
68
- # Homepage of the dataset for documentation
69
- homepage="https://github.com/kakaobrain/KorNLUDatasets",
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- """Returns SplitGenerators."""
75
- # TODO(kor_nli): Downloads the data and defines the splits
76
- # dl_manager is a datasets.download.DownloadManager that can be used to
77
- # download and extract URLs
78
- dl_dir = dl_manager.download_and_extract(_URL)
79
- dl_dir = os.path.join(dl_dir, "KorNLI")
80
- if self.config.name == "multi_nli":
81
- return [
82
- datasets.SplitGenerator(
83
- name=datasets.Split.TRAIN,
84
- # These kwargs will be passed to _generate_examples
85
- gen_kwargs={"filepath": os.path.join(dl_dir, "multinli.train.ko.tsv")},
86
- ),
87
- ]
88
- elif self.config.name == "snli":
89
- return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- # These kwargs will be passed to _generate_examples
93
- gen_kwargs={"filepath": os.path.join(dl_dir, "snli_1.0_train.ko.tsv")},
94
- ),
95
- ]
96
- else:
97
- return [
98
- datasets.SplitGenerator(
99
- name=datasets.Split.VALIDATION,
100
- # These kwargs will be passed to _generate_examples
101
- gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.dev.ko.tsv")},
102
- ),
103
- datasets.SplitGenerator(
104
- name=datasets.Split.TEST,
105
- # These kwargs will be passed to _generate_examples
106
- gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.test.ko.tsv")},
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- """Yields examples."""
112
- # TODO(kor_nli): Yields (key, example) tuples from the dataset
113
- with open(filepath, encoding="utf-8") as f:
114
- next(f) # skip headers
115
- columns = ("premise", "hypothesis", "label")
116
- for id_, row in enumerate(f):
117
- row = row.strip().split("\t")
118
- if len(row) != 3:
119
- continue
120
- row = dict(zip(columns, row))
121
- yield id_, row