Datasets:
Add a new text_only config option
Browse files- diffusiondb.py +91 -35
diffusiondb.py
CHANGED
@@ -3,8 +3,11 @@
|
|
3 |
"""Loading script for DiffusionDB."""
|
4 |
|
5 |
import numpy as np
|
|
|
|
|
6 |
from json import load, dump
|
7 |
from os.path import join, basename
|
|
|
8 |
|
9 |
import datasets
|
10 |
|
@@ -34,14 +37,20 @@ _LICENSE = "CC0 1.0"
|
|
34 |
_VERSION = datasets.Version("0.9.0")
|
35 |
|
36 |
# Programmatically generate the URLs for different parts
|
|
|
37 |
# https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-000001.zip
|
38 |
_URLS = {}
|
39 |
_PART_IDS = range(1, 2001)
|
40 |
|
41 |
for i in _PART_IDS:
|
42 |
-
_URLS[
|
43 |
-
i
|
44 |
-
|
|
|
|
|
|
|
|
|
|
|
45 |
|
46 |
|
47 |
class DiffusionDBConfig(datasets.BuilderConfig):
|
@@ -107,22 +116,46 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
|
|
107 |
),
|
108 |
)
|
109 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
110 |
# Default to only load 1k random images
|
111 |
DEFAULT_CONFIG_NAME = "random_1k"
|
112 |
|
113 |
def _info(self):
|
114 |
"""Specify the information of DiffusionDB."""
|
115 |
|
116 |
-
|
117 |
-
|
118 |
-
|
119 |
-
|
120 |
-
|
121 |
-
|
122 |
-
|
123 |
-
|
124 |
-
|
125 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
126 |
return datasets.DatasetInfo(
|
127 |
description=_DESCRIPTION,
|
128 |
features=features,
|
@@ -154,6 +187,11 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
|
|
154 |
data_dirs.append(data_dir)
|
155 |
json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json"))
|
156 |
|
|
|
|
|
|
|
|
|
|
|
157 |
return [
|
158 |
datasets.SplitGenerator(
|
159 |
name=datasets.Split.TRAIN,
|
@@ -171,26 +209,44 @@ class DiffusionDB(datasets.GeneratorBasedBuilder):
|
|
171 |
# The `key` is for legacy reasons (tfds) and is not important in itself,
|
172 |
# but must be unique for each example.
|
173 |
|
174 |
-
#
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
185 |
-
|
186 |
-
img_path = join(cur_data_dir, img_name)
|
187 |
-
|
188 |
-
# Yields examples as (key, example) tuples
|
189 |
-
yield img_name, {
|
190 |
-
"image": {"path": img_path, "bytes": open(img_path, "rb").read()},
|
191 |
-
"prompt": img_params["p"],
|
192 |
-
"seed": int(img_params["se"]),
|
193 |
-
"step": int(img_params["st"]),
|
194 |
-
"cfg": float(img_params["c"]),
|
195 |
-
"sampler": img_params["sa"],
|
196 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
3 |
"""Loading script for DiffusionDB."""
|
4 |
|
5 |
import numpy as np
|
6 |
+
import pandas as pd
|
7 |
+
|
8 |
from json import load, dump
|
9 |
from os.path import join, basename
|
10 |
+
from huggingface_hub import hf_hub_url
|
11 |
|
12 |
import datasets
|
13 |
|
|
|
37 |
_VERSION = datasets.Version("0.9.0")
|
38 |
|
39 |
# Programmatically generate the URLs for different parts
|
40 |
+
# hf_hub_url() provides a more flexible way to resolve the file URLs
|
41 |
# https://huggingface.co/datasets/poloclub/diffusiondb/resolve/main/images/part-000001.zip
|
42 |
_URLS = {}
|
43 |
_PART_IDS = range(1, 2001)
|
44 |
|
45 |
for i in _PART_IDS:
|
46 |
+
_URLS[i] = hf_hub_url(
|
47 |
+
"datasets/poloclub/diffusiondb", filename=f"images/part-{i:06}.zip"
|
48 |
+
)
|
49 |
+
|
50 |
+
# Add the metadata parquet URL as well
|
51 |
+
_URLS["metadata"] = hf_hub_url(
|
52 |
+
"datasets/poloclub/diffusiondb", filename=f"metadata.parquet"
|
53 |
+
)
|
54 |
|
55 |
|
56 |
class DiffusionDBConfig(datasets.BuilderConfig):
|
|
|
116 |
),
|
117 |
)
|
118 |
|
119 |
+
# We also prove a text-only option, which loads the meatadata parquet file
|
120 |
+
BUILDER_CONFIGS.append(
|
121 |
+
DiffusionDBConfig(
|
122 |
+
name="text_only",
|
123 |
+
part_ids=[],
|
124 |
+
description="Only include all prompts and parameters (no image)",
|
125 |
+
),
|
126 |
+
)
|
127 |
+
|
128 |
# Default to only load 1k random images
|
129 |
DEFAULT_CONFIG_NAME = "random_1k"
|
130 |
|
131 |
def _info(self):
|
132 |
"""Specify the information of DiffusionDB."""
|
133 |
|
134 |
+
if self.config.name == "text_only":
|
135 |
+
features = datasets.Features(
|
136 |
+
{
|
137 |
+
"image_name": datasets.Value("string"),
|
138 |
+
"prompt": datasets.Value("string"),
|
139 |
+
"part_id": datasets.Value("int64"),
|
140 |
+
"seed": datasets.Value("int64"),
|
141 |
+
"step": datasets.Value("int64"),
|
142 |
+
"cfg": datasets.Value("float32"),
|
143 |
+
"sampler": datasets.Value("string"),
|
144 |
+
},
|
145 |
+
)
|
146 |
+
|
147 |
+
else:
|
148 |
+
features = datasets.Features(
|
149 |
+
{
|
150 |
+
"image": datasets.Image(),
|
151 |
+
"prompt": datasets.Value("string"),
|
152 |
+
"seed": datasets.Value("int64"),
|
153 |
+
"step": datasets.Value("int64"),
|
154 |
+
"cfg": datasets.Value("float32"),
|
155 |
+
"sampler": datasets.Value("string"),
|
156 |
+
},
|
157 |
+
)
|
158 |
+
|
159 |
return datasets.DatasetInfo(
|
160 |
description=_DESCRIPTION,
|
161 |
features=features,
|
|
|
187 |
data_dirs.append(data_dir)
|
188 |
json_paths.append(join(data_dir, f"part-{cur_part_id:06}.json"))
|
189 |
|
190 |
+
# If we are in text_only mode, we only need to download the parquet file
|
191 |
+
# For convenience, we save the parquet path in `data_dirs`
|
192 |
+
if self.config.name == "text_only":
|
193 |
+
data_dirs = [dl_manager.download(_URLS["metadata"])]
|
194 |
+
|
195 |
return [
|
196 |
datasets.SplitGenerator(
|
197 |
name=datasets.Split.TRAIN,
|
|
|
209 |
# The `key` is for legacy reasons (tfds) and is not important in itself,
|
210 |
# but must be unique for each example.
|
211 |
|
212 |
+
# Load the metadata parquet file if the config is text_only
|
213 |
+
if self.config.name == "text_only":
|
214 |
+
metadata_df = pd.read_parquet(data_dirs[0])
|
215 |
+
for _, row in metadata_df.iterrows():
|
216 |
+
yield row["image_name"], {
|
217 |
+
"image_name": row["image_name"],
|
218 |
+
"prompt": row["prompt"],
|
219 |
+
"part_id": row["part_id"],
|
220 |
+
"seed": row["seed"],
|
221 |
+
"step": row["step"],
|
222 |
+
"cfg": row["cfg"],
|
223 |
+
"sampler": row["sampler"],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
224 |
}
|
225 |
+
|
226 |
+
else:
|
227 |
+
# Iterate through all extracted zip folders for images
|
228 |
+
num_data_dirs = len(data_dirs)
|
229 |
+
assert num_data_dirs == len(json_paths)
|
230 |
+
|
231 |
+
for k in range(num_data_dirs):
|
232 |
+
cur_data_dir = data_dirs[k]
|
233 |
+
cur_json_path = json_paths[k]
|
234 |
+
|
235 |
+
json_data = load(open(cur_json_path, "r", encoding="utf8"))
|
236 |
+
|
237 |
+
for img_name in json_data:
|
238 |
+
img_params = json_data[img_name]
|
239 |
+
img_path = join(cur_data_dir, img_name)
|
240 |
+
|
241 |
+
# Yields examples as (key, example) tuples
|
242 |
+
yield img_name, {
|
243 |
+
"image": {
|
244 |
+
"path": img_path,
|
245 |
+
"bytes": open(img_path, "rb").read(),
|
246 |
+
},
|
247 |
+
"prompt": img_params["p"],
|
248 |
+
"seed": int(img_params["se"]),
|
249 |
+
"step": int(img_params["st"]),
|
250 |
+
"cfg": float(img_params["c"]),
|
251 |
+
"sampler": img_params["sa"],
|
252 |
+
}
|