Datasets:
Update files from the datasets library (from 1.16.0)
Browse filesRelease notes: https://github.com/huggingface/datasets/releases/tag/1.16.0
README.md
CHANGED
@@ -1,4 +1,5 @@
|
|
1 |
---
|
|
|
2 |
annotations_creators:
|
3 |
- crowdsourced
|
4 |
language_creators:
|
|
|
1 |
---
|
2 |
+
pretty_name: ATOMIC
|
3 |
annotations_creators:
|
4 |
- crowdsourced
|
5 |
language_creators:
|
atomic.py
CHANGED
@@ -16,7 +16,6 @@
|
|
16 |
|
17 |
|
18 |
import json
|
19 |
-
import os
|
20 |
|
21 |
import datasets
|
22 |
|
@@ -93,56 +92,62 @@ class Atomic(datasets.GeneratorBasedBuilder):
|
|
93 |
def _split_generators(self, dl_manager):
|
94 |
"""Returns SplitGenerators."""
|
95 |
my_urls = _URLs[self.config.name]
|
96 |
-
|
97 |
return [
|
98 |
datasets.SplitGenerator(
|
99 |
name=datasets.Split.TRAIN,
|
100 |
gen_kwargs={
|
101 |
-
"filepath":
|
102 |
-
"
|
103 |
},
|
104 |
),
|
105 |
datasets.SplitGenerator(
|
106 |
name=datasets.Split.TEST,
|
107 |
-
gen_kwargs={
|
|
|
|
|
|
|
108 |
),
|
109 |
datasets.SplitGenerator(
|
110 |
name=datasets.Split.VALIDATION,
|
111 |
gen_kwargs={
|
112 |
-
"filepath":
|
113 |
-
"
|
114 |
},
|
115 |
),
|
116 |
]
|
117 |
|
118 |
-
def _generate_examples(self, filepath,
|
119 |
"""Yields examples from the Atomic dataset."""
|
120 |
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
126 |
-
|
127 |
-
|
128 |
-
|
129 |
-
|
130 |
-
|
131 |
-
|
132 |
-
|
133 |
-
|
134 |
-
|
135 |
-
|
136 |
-
|
137 |
-
|
138 |
-
|
139 |
-
|
140 |
-
|
141 |
-
|
142 |
-
|
143 |
-
|
144 |
-
|
145 |
-
|
146 |
-
|
147 |
-
|
148 |
-
|
|
|
|
|
|
|
|
16 |
|
17 |
|
18 |
import json
|
|
|
19 |
|
20 |
import datasets
|
21 |
|
|
|
92 |
def _split_generators(self, dl_manager):
|
93 |
"""Returns SplitGenerators."""
|
94 |
my_urls = _URLs[self.config.name]
|
95 |
+
archive = dl_manager.download(my_urls)
|
96 |
return [
|
97 |
datasets.SplitGenerator(
|
98 |
name=datasets.Split.TRAIN,
|
99 |
gen_kwargs={
|
100 |
+
"filepath": "v4_atomic_trn.csv",
|
101 |
+
"files": dl_manager.iter_archive(archive),
|
102 |
},
|
103 |
),
|
104 |
datasets.SplitGenerator(
|
105 |
name=datasets.Split.TEST,
|
106 |
+
gen_kwargs={
|
107 |
+
"filepath": "v4_atomic_tst.csv",
|
108 |
+
"files": dl_manager.iter_archive(archive),
|
109 |
+
},
|
110 |
),
|
111 |
datasets.SplitGenerator(
|
112 |
name=datasets.Split.VALIDATION,
|
113 |
gen_kwargs={
|
114 |
+
"filepath": "v4_atomic_dev.csv",
|
115 |
+
"files": dl_manager.iter_archive(archive),
|
116 |
},
|
117 |
),
|
118 |
]
|
119 |
|
120 |
+
def _generate_examples(self, filepath, files):
|
121 |
"""Yields examples from the Atomic dataset."""
|
122 |
|
123 |
+
for path, f in files:
|
124 |
+
if path == filepath:
|
125 |
+
for id_, row in enumerate(f):
|
126 |
+
row = row.decode("utf-8")
|
127 |
+
if row.startswith("event"):
|
128 |
+
continue
|
129 |
+
row = row.replace('"[', "[").replace(']"', "]")
|
130 |
+
sent, rest = row.split("[", 1)
|
131 |
+
sent = sent.strip(', "')
|
132 |
+
rest = "[" + rest
|
133 |
+
rest = rest.replace('""', '"').replace('\\\\"]', '"]')
|
134 |
+
rest = rest.split(",")
|
135 |
+
rest[-1] = '"' + rest[-1].strip() + '"'
|
136 |
+
rest = ",".join(rest)
|
137 |
+
row = '["' + sent + '",' + rest + "]"
|
138 |
+
row = json.loads(row)
|
139 |
+
yield id_, {
|
140 |
+
"event": str(row[0]),
|
141 |
+
"oEffect": row[1],
|
142 |
+
"oReact": row[2],
|
143 |
+
"oWant": row[3],
|
144 |
+
"xAttr": row[4],
|
145 |
+
"xEffect": row[5],
|
146 |
+
"xIntent": row[6],
|
147 |
+
"xNeed": row[7],
|
148 |
+
"xReact": row[8],
|
149 |
+
"xWant": row[9],
|
150 |
+
"prefix": row[10],
|
151 |
+
"split": str(row[11]),
|
152 |
+
}
|
153 |
+
break
|