albertvillanova HF staff commited on
Commit
3e0e462
1 Parent(s): 67f64b8

Convert dataset to Parquet (#7)

Browse files

- Convert dataset to Parquet (ec6839eed447dfffbd0bfb7551701b7dcadd44aa)
- Add 'snli' config data files (cdf18d97fcea5bf4a6d7a6e9365ea20d4024eaae)
- Add 'xnli' config data files (17289f0bafe71b2cc393b7d1530cf24db6f18f47)
- Delete loading script (a4fdb7b517f363a92fd8cf5a447301115c6a4d81)
- Delete data file (a2a6a1a066884fb48ec27a1e66152f0184233c6e)

README.md CHANGED
@@ -39,10 +39,10 @@ dataset_info:
39
  '2': contradiction
40
  splits:
41
  - name: train
42
- num_bytes: 84729207
43
  num_examples: 392702
44
- download_size: 42113232
45
- dataset_size: 84729207
46
  - config_name: snli
47
  features:
48
  - name: premise
@@ -58,10 +58,10 @@ dataset_info:
58
  '2': contradiction
59
  splits:
60
  - name: train
61
- num_bytes: 80137097
62
  num_examples: 550152
63
- download_size: 42113232
64
- dataset_size: 80137097
65
  - config_name: xnli
66
  features:
67
  - name: premise
@@ -77,13 +77,28 @@ dataset_info:
77
  '2': contradiction
78
  splits:
79
  - name: validation
80
- num_bytes: 518830
81
  num_examples: 2490
82
  - name: test
83
- num_bytes: 1047437
84
  num_examples: 5010
85
- download_size: 42113232
86
- dataset_size: 1566267
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
87
  ---
88
 
89
  # Dataset Card for "kor_nli"
 
39
  '2': contradiction
40
  splits:
41
  - name: train
42
+ num_bytes: 84728887
43
  num_examples: 392702
44
+ download_size: 54693610
45
+ dataset_size: 84728887
46
  - config_name: snli
47
  features:
48
  - name: premise
 
58
  '2': contradiction
59
  splits:
60
  - name: train
61
+ num_bytes: 80136649
62
  num_examples: 550152
63
+ download_size: 22015955
64
+ dataset_size: 80136649
65
  - config_name: xnli
66
  features:
67
  - name: premise
 
77
  '2': contradiction
78
  splits:
79
  - name: validation
80
+ num_bytes: 518822
81
  num_examples: 2490
82
  - name: test
83
+ num_bytes: 1047429
84
  num_examples: 5010
85
+ download_size: 529321
86
+ dataset_size: 1566251
87
+ configs:
88
+ - config_name: multi_nli
89
+ data_files:
90
+ - split: train
91
+ path: multi_nli/train-*
92
+ - config_name: snli
93
+ data_files:
94
+ - split: train
95
+ path: snli/train-*
96
+ - config_name: xnli
97
+ data_files:
98
+ - split: validation
99
+ path: xnli/validation-*
100
+ - split: test
101
+ path: xnli/test-*
102
  ---
103
 
104
  # Dataset Card for "kor_nli"
kor_nli.py DELETED
@@ -1,121 +0,0 @@
1
- """TODO(kor_nli): Add a description here."""
2
-
3
-
4
- import os
5
-
6
- import datasets
7
-
8
-
9
- # TODO(kor_nli): BibTeX citation
10
- _CITATION = """\
11
- @article{ham2020kornli,
12
- title={KorNLI and KorSTS: New Benchmark Datasets for Korean Natural Language Understanding},
13
- author={Ham, Jiyeon and Choe, Yo Joong and Park, Kyubyong and Choi, Ilji and Soh, Hyungjoon},
14
- journal={arXiv preprint arXiv:2004.03289},
15
- year={2020}
16
- }
17
- """
18
-
19
- # TODO(kor_nli):
20
- _DESCRIPTION = """ Korean Natural Language Inference datasets
21
- """
22
- _URL = "data.zip"
23
-
24
-
25
- class KorNLIConfig(datasets.BuilderConfig):
26
- """BuilderConfig for KorNLI."""
27
-
28
- def __init__(self, **kwargs):
29
- """BuilderConfig for KorNLI.
30
-
31
- Args:
32
-
33
- **kwargs: keyword arguments forwarded to super.
34
- """
35
- # Version 1.1.0 remove empty document and summary strings.
36
- super(KorNLIConfig, self).__init__(version=datasets.Version("1.0.0"), **kwargs)
37
-
38
-
39
- class KorNli(datasets.GeneratorBasedBuilder):
40
- """TODO(kor_nli): Short description of my dataset."""
41
-
42
- # TODO(kor_nli): Set up version.
43
- VERSION = datasets.Version("1.0.0")
44
- BUILDER_CONFIGS = [
45
- KorNLIConfig(name="multi_nli", description="Korean multi NLI datasets"),
46
- KorNLIConfig(name="snli", description="Korean SNLI dataset"),
47
- KorNLIConfig(name="xnli", description="Korean XNLI dataset"),
48
- ]
49
-
50
- def _info(self):
51
- # TODO(kor_nli): Specifies the datasets.DatasetInfo object
52
- return datasets.DatasetInfo(
53
- # This is the description that will appear on the datasets page.
54
- description=_DESCRIPTION,
55
- # datasets.features.FeatureConnectors
56
- features=datasets.Features(
57
- {
58
- # These are the features of your dataset like images, labels ...
59
- "premise": datasets.Value("string"),
60
- "hypothesis": datasets.Value("string"),
61
- "label": datasets.ClassLabel(names=["entailment", "neutral", "contradiction"]),
62
- }
63
- ),
64
- # If there's a common (input, target) tuple from the features,
65
- # specify them here. They'll be used if as_supervised=True in
66
- # builder.as_dataset.
67
- supervised_keys=None,
68
- # Homepage of the dataset for documentation
69
- homepage="https://github.com/kakaobrain/KorNLUDatasets",
70
- citation=_CITATION,
71
- )
72
-
73
- def _split_generators(self, dl_manager):
74
- """Returns SplitGenerators."""
75
- # TODO(kor_nli): Downloads the data and defines the splits
76
- # dl_manager is a datasets.download.DownloadManager that can be used to
77
- # download and extract URLs
78
- dl_dir = dl_manager.download_and_extract(_URL)
79
- dl_dir = os.path.join(dl_dir, "KorNLI")
80
- if self.config.name == "multi_nli":
81
- return [
82
- datasets.SplitGenerator(
83
- name=datasets.Split.TRAIN,
84
- # These kwargs will be passed to _generate_examples
85
- gen_kwargs={"filepath": os.path.join(dl_dir, "multinli.train.ko.tsv")},
86
- ),
87
- ]
88
- elif self.config.name == "snli":
89
- return [
90
- datasets.SplitGenerator(
91
- name=datasets.Split.TRAIN,
92
- # These kwargs will be passed to _generate_examples
93
- gen_kwargs={"filepath": os.path.join(dl_dir, "snli_1.0_train.ko.tsv")},
94
- ),
95
- ]
96
- else:
97
- return [
98
- datasets.SplitGenerator(
99
- name=datasets.Split.VALIDATION,
100
- # These kwargs will be passed to _generate_examples
101
- gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.dev.ko.tsv")},
102
- ),
103
- datasets.SplitGenerator(
104
- name=datasets.Split.TEST,
105
- # These kwargs will be passed to _generate_examples
106
- gen_kwargs={"filepath": os.path.join(dl_dir, "xnli.test.ko.tsv")},
107
- ),
108
- ]
109
-
110
- def _generate_examples(self, filepath):
111
- """Yields examples."""
112
- # TODO(kor_nli): Yields (key, example) tuples from the dataset
113
- with open(filepath, encoding="utf-8") as f:
114
- next(f) # skip headers
115
- columns = ("premise", "hypothesis", "label")
116
- for id_, row in enumerate(f):
117
- row = row.strip().split("\t")
118
- if len(row) != 3:
119
- continue
120
- row = dict(zip(columns, row))
121
- yield id_, row
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
data.zip → multi_nli/train-00000-of-00001.parquet RENAMED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0e1f08af79f96ec66293df406ca2475be8ffffccb95115c9441cd160fd49f89
3
- size 41666635
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8de5f800e4c9d0fb89bd6a36a800f7ec26be2a97f50c32f2aff3c197ca155f66
3
+ size 54693610
snli/train-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dd6c2d95ae7b8979e2a89c0d05b208f38d480ca4e23f8249122e9b40116b5572
3
+ size 22015955
xnli/test-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2ebba3632ffac4d05a18e76629486eb88c584ae5459d15181c295341e6af659b
3
+ size 351479
xnli/validation-00000-of-00001.parquet ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ef903c790a16149f167367a01775aaf7e5d6d1a74deb89400eec4f3f29b4aec8
3
+ size 177842