VictorSanh
commited on
Commit
•
b6b3cf8
1
Parent(s):
1688659
cleaning
Browse files
P3.py
CHANGED
@@ -78,7 +78,7 @@ _DATA_PATH = "data"
|
|
78 |
# )
|
79 |
# return ds
|
80 |
|
81 |
-
def load_cached_task(features_file, tfrecord
|
82 |
# # TODO(Victor): this info.*.json is actually done twice... -> factorize
|
83 |
# with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f:
|
84 |
with tf.io.gfile.GFile(features_file) as f:
|
@@ -100,9 +100,6 @@ def load_cached_task(features_file, tfrecord, split):
|
|
100 |
feat: _feature_config(**desc) for feat, desc in features.items()
|
101 |
}
|
102 |
|
103 |
-
# tfrecords = os.path.join(
|
104 |
-
# cache_dir, f"{split}.tfrecord-*-of-*{split_info['num_shards']}"
|
105 |
-
# )
|
106 |
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord]))
|
107 |
ds = ds.map(
|
108 |
lambda pb: tf.io.parse_single_example(pb, feature_description),
|
@@ -116,6 +113,7 @@ def load_cached_task(features_file, tfrecord, split):
|
|
116 |
)
|
117 |
return ds
|
118 |
|
|
|
119 |
def find_task_splits_and_features():
|
120 |
"""Find the available tasks under ./data and their available splits and features."""
|
121 |
task_and_their_splits = defaultdict(dict)
|
@@ -239,7 +237,6 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
239 |
gen_kwargs={
|
240 |
"features_file": data_dir[task_name][split_name]["features_file"],
|
241 |
"tfrecord": data_dir[task_name][split_name]["tfrecord"],
|
242 |
-
"split": split_name,
|
243 |
}
|
244 |
)
|
245 |
)
|
@@ -251,7 +248,6 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
251 |
gen_kwargs={
|
252 |
"features_file": data_dir[task_name][split_name]["features_file"],
|
253 |
"tfrecord": data_dir[task_name][split_name]["tfrecord"],
|
254 |
-
"split": split_name,
|
255 |
}
|
256 |
)
|
257 |
)
|
@@ -263,7 +259,6 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
263 |
gen_kwargs={
|
264 |
"features_file": data_dir[task_name][split_name]["features_file"],
|
265 |
"tfrecord": data_dir[task_name][split_name]["tfrecord"],
|
266 |
-
"split": split_name,
|
267 |
}
|
268 |
)
|
269 |
)
|
@@ -276,14 +271,13 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
276 |
gen_kwargs={
|
277 |
"features_file": data_dir[task_name][special_split_name]["features_file"],
|
278 |
"tfrecord": data_dir[task_name][special_split_name]["tfrecord"],
|
279 |
-
"split": special_split_name,
|
280 |
}
|
281 |
)
|
282 |
)
|
283 |
return split_generators
|
284 |
|
285 |
|
286 |
-
def _generate_examples(self, features_file, tfrecord
|
287 |
"""This function returns the examples in the raw (text) form."""
|
288 |
_FEAT_MAPPING_FUNCTIONS = {
|
289 |
"answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
|
@@ -297,7 +291,7 @@ class P3(datasets.GeneratorBasedBuilder):
|
|
297 |
}
|
298 |
|
299 |
key = 0
|
300 |
-
ds = load_cached_task(features_file, tfrecord
|
301 |
for ex in ds.as_numpy_iterator():
|
302 |
ex_dict = {}
|
303 |
for feat_name, feat_value in ex.items():
|
|
|
78 |
# )
|
79 |
# return ds
|
80 |
|
81 |
+
def load_cached_task(features_file, tfrecord):
|
82 |
# # TODO(Victor): this info.*.json is actually done twice... -> factorize
|
83 |
# with tf.io.gfile.GFile(os.path.join(cache_dir, f"info.{split}.json")) as f:
|
84 |
with tf.io.gfile.GFile(features_file) as f:
|
|
|
100 |
feat: _feature_config(**desc) for feat, desc in features.items()
|
101 |
}
|
102 |
|
|
|
|
|
|
|
103 |
ds = tf.data.TFRecordDataset(tf.io.gfile.glob([tfrecord]))
|
104 |
ds = ds.map(
|
105 |
lambda pb: tf.io.parse_single_example(pb, feature_description),
|
|
|
113 |
)
|
114 |
return ds
|
115 |
|
116 |
+
|
117 |
def find_task_splits_and_features():
|
118 |
"""Find the available tasks under ./data and their available splits and features."""
|
119 |
task_and_their_splits = defaultdict(dict)
|
|
|
237 |
gen_kwargs={
|
238 |
"features_file": data_dir[task_name][split_name]["features_file"],
|
239 |
"tfrecord": data_dir[task_name][split_name]["tfrecord"],
|
|
|
240 |
}
|
241 |
)
|
242 |
)
|
|
|
248 |
gen_kwargs={
|
249 |
"features_file": data_dir[task_name][split_name]["features_file"],
|
250 |
"tfrecord": data_dir[task_name][split_name]["tfrecord"],
|
|
|
251 |
}
|
252 |
)
|
253 |
)
|
|
|
259 |
gen_kwargs={
|
260 |
"features_file": data_dir[task_name][split_name]["features_file"],
|
261 |
"tfrecord": data_dir[task_name][split_name]["tfrecord"],
|
|
|
262 |
}
|
263 |
)
|
264 |
)
|
|
|
271 |
gen_kwargs={
|
272 |
"features_file": data_dir[task_name][special_split_name]["features_file"],
|
273 |
"tfrecord": data_dir[task_name][special_split_name]["tfrecord"],
|
|
|
274 |
}
|
275 |
)
|
276 |
)
|
277 |
return split_generators
|
278 |
|
279 |
|
280 |
+
def _generate_examples(self, features_file, tfrecord):
|
281 |
"""This function returns the examples in the raw (text) form."""
|
282 |
_FEAT_MAPPING_FUNCTIONS = {
|
283 |
"answer_choices": lambda x: [choice.decode("utf-8") for choice in x],
|
|
|
291 |
}
|
292 |
|
293 |
key = 0
|
294 |
+
ds = load_cached_task(features_file, tfrecord)
|
295 |
for ex in ds.as_numpy_iterator():
|
296 |
ex_dict = {}
|
297 |
for feat_name, feat_value in ex.items():
|