Datasets:
Change pyarrow to pandas for dataset preview
Browse files- diffusiondb.py +11 -15
diffusiondb.py
CHANGED
@@ -11,8 +11,6 @@ from os.path import join, basename
|
|
11 |
from huggingface_hub import hf_hub_url
|
12 |
|
13 |
import datasets
|
14 |
-
import pyarrow as pa
|
15 |
-
import pyarrow.parquet as pq
|
16 |
|
17 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
18 |
_CITATION = """\
|
@@ -359,11 +357,12 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
|
|
359 |
cur_id = int(re.sub(r"part-(\d+)\.json", r"\1", basename(path)))
|
360 |
part_ids.append(cur_id)
|
361 |
|
362 |
-
|
|
|
|
|
363 |
metadata_path,
|
364 |
filters=[("part_id", "in", part_ids)],
|
365 |
)
|
366 |
-
print(metadata_table.shape)
|
367 |
|
368 |
# Iterate through all extracted zip folders for images
|
369 |
for k in range(num_data_dirs):
|
@@ -376,11 +375,8 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
|
|
376 |
img_params = json_data[img_name]
|
377 |
img_path = join(cur_data_dir, img_name)
|
378 |
|
379 |
-
# Query the
|
380 |
-
|
381 |
-
metadata_table.column("image_name"), img_name
|
382 |
-
)
|
383 |
-
query_result = metadata_table.filter(row_mask)
|
384 |
|
385 |
# Yields examples as (key, example) tuples
|
386 |
yield img_name, {
|
@@ -393,10 +389,10 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
|
|
393 |
"step": int(img_params["st"]),
|
394 |
"cfg": float(img_params["c"]),
|
395 |
"sampler": img_params["sa"],
|
396 |
-
"width": query_result["width"][0]
|
397 |
-
"height": query_result["height"][0]
|
398 |
-
"user_name": query_result["user_name"][0]
|
399 |
-
"timestamp": query_result["timestamp"][0]
|
400 |
-
"image_nsfw": query_result["image_nsfw"][0]
|
401 |
-
"prompt_nsfw": query_result["prompt_nsfw"][0]
|
402 |
}
|
|
|
11 |
from huggingface_hub import hf_hub_url
|
12 |
|
13 |
import datasets
|
|
|
|
|
14 |
|
15 |
# Find for instance the citation on arxiv or on the dataset repo/website
|
16 |
_CITATION = """\
|
|
|
357 |
cur_id = int(re.sub(r"part-(\d+)\.json", r"\1", basename(path)))
|
358 |
part_ids.append(cur_id)
|
359 |
|
360 |
+
# We have to use pandas here to make the dataset preview work (it
|
361 |
+
# uses streaming mode)
|
362 |
+
metadata_table = pd.read_parquet(
|
363 |
metadata_path,
|
364 |
filters=[("part_id", "in", part_ids)],
|
365 |
)
|
|
|
366 |
|
367 |
# Iterate through all extracted zip folders for images
|
368 |
for k in range(num_data_dirs):
|
|
|
375 |
img_params = json_data[img_name]
|
376 |
img_path = join(cur_data_dir, img_name)
|
377 |
|
378 |
+
# Query the metadata
|
379 |
+
query_result = metadata_table.query(f'`image_name` == "{img_name}"')
|
|
|
|
|
|
|
380 |
|
381 |
# Yields examples as (key, example) tuples
|
382 |
yield img_name, {
|
|
|
389 |
"step": int(img_params["st"]),
|
390 |
"cfg": float(img_params["c"]),
|
391 |
"sampler": img_params["sa"],
|
392 |
+
"width": query_result["width"].to_list()[0],
|
393 |
+
"height": query_result["height"].to_list()[0],
|
394 |
+
"user_name": query_result["user_name"].to_list()[0],
|
395 |
+
"timestamp": query_result["timestamp"].to_list()[0],
|
396 |
+
"image_nsfw": query_result["image_nsfw"].to_list()[0],
|
397 |
+
"prompt_nsfw": query_result["prompt_nsfw"].to_list()[0],
|
398 |
}
|