Stable-X commited on
Commit
9dfa4de
β€’
1 Parent(s): 0169285

Update code

Browse files
.gitignore ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ .idea
2
+ .DS_Store
3
+ __pycache__
4
+ weights
README.md CHANGED
@@ -6,8 +6,8 @@ colorTo: yellow
6
  sdk: gradio
7
  sdk_version: 4.36.1
8
  app_file: app.py
9
- pinned: false
10
  license: apache-2.0
 
 
11
  ---
12
-
13
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
6
  sdk: gradio
7
  sdk_version: 4.36.1
8
  app_file: app.py
9
+ pinned: true
10
  license: apache-2.0
11
+ hf_oauth: true
12
+ hf_oauth_expiration_minutes: 43200
13
  ---
 
 
app.py ADDED
@@ -0,0 +1,389 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Anton Obukhov, ETH Zurich. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # --------------------------------------------------------------------------
15
+ # If you find this code useful, we kindly ask you to cite our paper in your work.
16
+ # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
+ # More information about the method can be found at https://marigoldmonodepth.github.io
18
+ # --------------------------------------------------------------------------
19
+
20
+ import functools
21
+ import os
22
+
23
+ import gradio as gr
24
+ import numpy as np
25
+ import torch as torch
26
+ from PIL import Image
27
+
28
+ import spaces
29
+
30
+ import diffusers
31
+
32
+ from stablenormal.pipeline_yoso_normal import YOSONormalsPipeline
33
+ from stablenormal.pipeline_stablenormal import StableNormalPipeline
34
+ from stablenormal.scheduler.heuristics_ddimsampler import HEURI_DDIMScheduler
35
+
36
+ from data_utils import HWC3, resize_image
37
+
38
+ import sys
39
+ import cv2
40
+ sys.path.append('./geowizard')
41
+ from models.geowizard_pipeline import DepthNormalEstimationPipeline
42
+
43
+ class Geowizard(object):
44
+ '''
45
+ Simple Stable Diffusion Package
46
+ '''
47
+
48
+ def __init__(self):
49
+ self.model = DepthNormalEstimationPipeline.from_pretrained("weights/Geowizard/", torch_dtype=torch.float16)
50
+
51
+ def cuda(self):
52
+ self.model.cuda()
53
+ return self
54
+
55
+ def cpu(self):
56
+ self.model.cpu()
57
+ return self
58
+
59
+ def float(self):
60
+ self.model.float()
61
+ return self
62
+
63
+ def to(self, device):
64
+ self.model.to(device)
65
+ return self
66
+
67
+ def eval(self):
68
+ self.model.eval()
69
+
70
+ return self
71
+
72
+ def train(self):
73
+ self.model.train()
74
+ return self
75
+
76
+ @torch.no_grad()
77
+ def __call__(self, img, image_resolution=768):
78
+
79
+ pipe_out = self.model(Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)),
80
+ denoising_steps = 10,
81
+ ensemble_size= 1,
82
+ processing_res = image_resolution,
83
+ match_input_res = True,
84
+ domain = "indoor",
85
+ color_map = "Spectral",
86
+ show_progress_bar = False,
87
+ )
88
+ pred_normal = pipe_out.normal_np
89
+ pred_normal = (pred_normal + 1) / 2 * 255
90
+ pred_normal = pred_normal.astype(np.uint8)
91
+
92
+
93
+ return pred_normal
94
+
95
+
96
+ def __repr__(self):
97
+
98
+ return f"model: \n{self.model}"
99
+
100
+ class Marigold(Geowizard):
101
+ '''
102
+ Simple Stable Diffusion Package
103
+ '''
104
+
105
+ def __init__(self):
106
+ self.model= diffusers.MarigoldNormalsPipeline.from_pretrained("weights/marigold-normals-v0-1", torch_dtype=torch.float16)
107
+
108
+
109
+ @torch.no_grad()
110
+ def __call__(self, img, image_resolution=768):
111
+
112
+ pipe_out = self.model(Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)))
113
+ pred_normal = pipe_out.prediction[0]
114
+ pred_normal[..., 0] = -pred_normal[..., 0]
115
+
116
+ pred_normal = (pred_normal + 1) / 2 * 255
117
+ pred_normal = pred_normal.astype(np.uint8)
118
+
119
+
120
+ return pred_normal
121
+
122
+
123
+
124
+ def __repr__(self):
125
+
126
+ return f"model: \n{self.model}"
127
+
128
+ class StableNormal(Geowizard):
129
+ '''
130
+ Simple Stable Diffusion Package
131
+ '''
132
+
133
+ def __init__(self):
134
+ x_start_pipeline = YOSONormalsPipeline.from_pretrained('/workspace/code/InverseRendering/StableNormal/weights/yoso-normal-v0-2',
135
+ variant="fp16", torch_dtype=torch.float16)
136
+ self.model = StableNormalPipeline.from_pretrained('/workspace/code/InverseRendering/StableNormal/weights/stable-normal-v0-1',
137
+ variant="fp16", torch_dtype=torch.float16,
138
+ scheduler=HEURI_DDIMScheduler(prediction_type='sample',
139
+ beta_start=0.00085, beta_end=0.0120,
140
+ beta_schedule = "scaled_linear"))
141
+ # two stage concat
142
+ self.model.x_start_pipeline = x_start_pipeline
143
+ self.model.x_start_pipeline.to('cuda', torch.float16)
144
+ self.model.prior.to('cuda', torch.float16)
145
+
146
+
147
+ @torch.no_grad()
148
+ def __call__(self, img, image_resolution=768):
149
+ pipe_out = self.model(Image.fromarray(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)))
150
+ pred_normal = pipe_out.prediction[0]
151
+ pred_normal = (pred_normal + 1) / 2 * 255
152
+ pred_normal = pred_normal.astype(np.uint8)
153
+
154
+ return pred_normal
155
+
156
+ def to(self, device):
157
+ self.model.to(device, torch.float16)
158
+
159
+
160
+
161
+ def __repr__(self):
162
+
163
+ return f"model: \n{self.model}"
164
+
165
+ class DSINE(object):
166
+ '''
167
+ Simple Stable Diffusion Package
168
+ '''
169
+
170
+ def __init__(self):
171
+ self.model = torch.hub.load("hugoycj/DSINE-hub", "DSINE", local_file_path='./models/dsine.pt', trust_repo=True)
172
+
173
+ def cuda(self):
174
+ self.model.cuda()
175
+ return self
176
+
177
+ def float(self):
178
+ self.model.float()
179
+ return self
180
+
181
+ def to(self, device):
182
+ self.model.to(device)
183
+ return self
184
+
185
+ def eval(self):
186
+ self.model.eval()
187
+
188
+ return self
189
+
190
+ def train(self):
191
+ self.model.train()
192
+ return self
193
+
194
+ @torch.no_grad()
195
+ def __call__(self, img, image_resolution=768):
196
+ pred_normal = self.model.infer_cv2(img)[0] # (3, H, W)
197
+ pred_normal = (pred_normal + 1) / 2 * 255
198
+ pred_normal = pred_normal.cpu().numpy().transpose(1, 2, 0)
199
+
200
+ # rgb
201
+ pred_normal = pred_normal.astype(np.uint8)
202
+
203
+ return pred_normal
204
+
205
+
206
+ def __repr__(self):
207
+
208
+ return f"model: \n{self.model}"
209
+
210
+
211
+ def process(
212
+ pipe_list,
213
+ path_input,
214
+ ):
215
+ names = ['DSINE', 'Marigold', 'GeoWizard', 'StableNormal']
216
+
217
+ path_out_vis_list = []
218
+ for pipe in pipe_list:
219
+
220
+ try:
221
+ pipe.to('cuda')
222
+ except:
223
+ pass
224
+
225
+ img = cv2.imread(path_input)
226
+ raw_input_image = HWC3(img)
227
+ ori_H, ori_W, _ = raw_input_image.shape
228
+
229
+ img = resize_image(raw_input_image, 768)
230
+
231
+ pipe_out = pipe(
232
+ img,
233
+ 768,
234
+ )
235
+ pred_normal= cv2.resize(pipe_out, (ori_W, ori_H))
236
+ path_out_vis_list.append(Image.fromarray(pred_normal))
237
+
238
+ try:
239
+ pipe.to('cpu')
240
+ except:
241
+ pass
242
+
243
+ _output = path_out_vis_list + [None] * (4 - len(path_out_vis_list))
244
+ yield _output
245
+
246
+ def run_demo_server(pipe):
247
+ process_pipe = spaces.GPU(functools.partial(process, pipe), duration=120)
248
+ os.environ["GRADIO_ALLOW_FLAGGING"] = "never"
249
+
250
+ with gr.Blocks(
251
+ analytics_enabled=False,
252
+ title="Normal Estimation Comparison",
253
+ css="""
254
+ #download {
255
+ height: 118px;
256
+ }
257
+ .slider .inner {
258
+ width: 5px;
259
+ background: #FFF;
260
+ }
261
+ .viewport {
262
+ aspect-ratio: 4/3;
263
+ }
264
+ h1 {
265
+ text-align: center;
266
+ display: block;
267
+ }
268
+ h2 {
269
+ text-align: center;
270
+ display: block;
271
+ }
272
+ h3 {
273
+ text-align: center;
274
+ display: block;
275
+ }
276
+ """,
277
+ ) as demo:
278
+
279
+ with gr.Row():
280
+ with gr.Column():
281
+ input_image = gr.Image(
282
+ label="Input Image",
283
+ type="filepath",
284
+ height=256,
285
+ )
286
+ with gr.Column():
287
+ submit_btn = gr.Button(value="Compute normal", variant="primary")
288
+ clear_btn = gr.Button(value="Clear")
289
+ with gr.Row():
290
+ with gr.Column():
291
+ DSINE_output_slider = gr.Image(
292
+ label="DSINE",
293
+ type="filepath",
294
+ )
295
+ with gr.Column():
296
+ marigold_output_slider = gr.Image(
297
+ label="Marigold",
298
+ type="filepath",
299
+ )
300
+ with gr.Column():
301
+ geowizard_output_slider = gr.Image(
302
+ label="Geowizard",
303
+ type="filepath",
304
+ )
305
+ with gr.Column():
306
+ Ours_slider = gr.Image(
307
+ label="StableNormal",
308
+ type="filepath",
309
+ )
310
+
311
+ outputs = [
312
+ DSINE_output_slider,
313
+ marigold_output_slider,
314
+ geowizard_output_slider,
315
+ Ours_slider,
316
+ ]
317
+
318
+ submit_btn.click(
319
+ fn=process_pipe,
320
+ inputs=input_image,
321
+ outputs=outputs,
322
+ concurrency_limit=1,
323
+ )
324
+
325
+ gr.Examples(
326
+ fn=process_pipe,
327
+ examples=sorted([
328
+ os.path.join("files", "images", name)
329
+ for name in os.listdir(os.path.join("files", "images"))
330
+ ]),
331
+ inputs=input_image,
332
+ outputs=outputs,
333
+ cache_examples=False,
334
+ )
335
+
336
+ def clear_fn():
337
+ out = []
338
+ out += [
339
+ gr.Button(interactive=True),
340
+ gr.Button(interactive=True),
341
+ gr.Image(value=None, interactive=True),
342
+ None,
343
+ None,
344
+ None,
345
+ None,
346
+ None,
347
+ None,
348
+ ]
349
+ return out
350
+
351
+ clear_btn.click(
352
+ fn=clear_fn,
353
+ inputs=[],
354
+ outputs=
355
+ [
356
+ submit_btn,
357
+ input_image,
358
+ marigold_output_slider,
359
+ geowizard_output_slider,
360
+ DSINE_output_slider,
361
+ Ours_slider,
362
+ ],
363
+ )
364
+
365
+
366
+ demo.queue(
367
+ api_open=False,
368
+ ).launch(
369
+ server_name="0.0.0.0",
370
+ server_port=7860,
371
+ share=False
372
+ )
373
+
374
+
375
+ def main():
376
+
377
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
378
+
379
+ marigold_pipe = Marigold()
380
+ geowizard_pipe = Geowizard()
381
+ dsine_pipe = DSINE()
382
+ our_pipe = StableNormal()
383
+
384
+ run_demo_server([dsine_pipe, marigold_pipe, geowizard_pipe, our_pipe])
385
+
386
+
387
+ if __name__ == "__main__":
388
+ main()
389
+
data_utils.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import cv2
4
+
5
+ def HWC3(x):
6
+ assert x.dtype == np.uint8
7
+ if x.ndim == 2:
8
+ x = x[:, :, None]
9
+ assert x.ndim == 3
10
+ H, W, C = x.shape
11
+ assert C == 1 or C == 3 or C == 4
12
+ if C == 3:
13
+ return x
14
+ if C == 1:
15
+ return np.concatenate([x, x, x], axis=2)
16
+ if C == 4:
17
+ color = x[:, :, 0:3].astype(np.float32)
18
+ alpha = x[:, :, 3:4].astype(np.float32) / 255.0
19
+ y = color * alpha + 255.0 * (1.0 - alpha)
20
+ y = y.clip(0, 255).astype(np.uint8)
21
+ return y
22
+
23
+
24
+ def resize_image(input_image, resolution):
25
+ H, W, C = input_image.shape
26
+ H = float(H)
27
+ W = float(W)
28
+ k = float(resolution) / min(H, W)
29
+ H *= k
30
+ W *= k
31
+ H = int(np.round(H / 64.0)) * 64
32
+ W = int(np.round(W / 64.0)) * 64
33
+ img = cv2.resize(input_image, (W, H), interpolation=cv2.INTER_LANCZOS4 if k > 1 else cv2.INTER_AREA)
34
+ return img
35
+
36
+
37
+ # normalize
38
+ def norm_normalize(norm_out):
39
+ norm_x, norm_y, norm_z = torch.split(norm_out, 1, dim=0)
40
+ norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10
41
+ final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm], dim=0)
42
+ fg_mask = torch.ones_like(norm).repeat(3, 1, 1)
43
+ fg_mask[norm.repeat(3, 1, 1) < 0.5] = 0.
44
+ fg_mask[norm.repeat(3, 1, 1) > 1.5] = 0.
45
+
46
+ final_out[norm.repeat(3, 1, 1) < 0.5] = -1
47
+ final_out[norm.repeat(3, 1, 1) > 1.5] = -1
48
+ return final_out, fg_mask.bool()
49
+
50
+
51
+ def center_crop(input_image):
52
+ height, width = input_image.shape[:2]
53
+
54
+ if height < width:
55
+ min_dim = height
56
+ else:
57
+ min_dim = width
58
+
59
+ center_x = width // 2
60
+ center_y = height // 2
61
+ half_length = min_dim // 2
62
+
63
+ crop_x1 = center_x - half_length
64
+ crop_x2 = center_x + half_length
65
+ crop_y1 = center_y - half_length
66
+ crop_y2 = center_y + half_length
67
+
68
+ center_cropped_image = input_image[crop_y1:crop_y2, crop_x1:crop_x2]
69
+
70
+ return center_cropped_image
71
+
72
+
73
+ def flip_x(normal):
74
+
75
+ if isinstance(normal, np.ndarray):
76
+ return normal.dot(np.array([[-1, 0, 0], [0, 1, 0], [0, 0, 1]])).astype(np.float32)
77
+ else:
78
+ trans = torch.tensor([[-1, 0, 0], [0, 1, 0], [0, 0, 1]]).float()
79
+ return normal @ trans
80
+
81
+
geowizard/models/attention.py ADDED
@@ -0,0 +1,777 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ # Some modifications are reimplemented in public environments by Xiao Fu and Mu Hu
17
+
18
+
19
+ from typing import Any, Dict, Optional
20
+
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from torch import nn
24
+ import xformers
25
+
26
+ from diffusers.utils import USE_PEFT_BACKEND
27
+ from diffusers.utils.torch_utils import maybe_allow_in_graph
28
+ from diffusers.models.activations import GEGLU, GELU, ApproximateGELU
29
+ from diffusers.models.attention_processor import Attention
30
+ from diffusers.models.embeddings import SinusoidalPositionalEmbedding
31
+ from diffusers.models.lora import LoRACompatibleLinear
32
+ from diffusers.models.normalization import AdaLayerNorm, AdaLayerNormContinuous, AdaLayerNormZero, RMSNorm
33
+
34
+
35
+ def _chunked_feed_forward(
36
+ ff: nn.Module, hidden_states: torch.Tensor, chunk_dim: int, chunk_size: int, lora_scale: Optional[float] = None
37
+ ):
38
+ # "feed_forward_chunk_size" can be used to save memory
39
+ if hidden_states.shape[chunk_dim] % chunk_size != 0:
40
+ raise ValueError(
41
+ f"`hidden_states` dimension to be chunked: {hidden_states.shape[chunk_dim]} has to be divisible by chunk size: {chunk_size}. Make sure to set an appropriate `chunk_size` when calling `unet.enable_forward_chunking`."
42
+ )
43
+
44
+ num_chunks = hidden_states.shape[chunk_dim] // chunk_size
45
+ if lora_scale is None:
46
+ ff_output = torch.cat(
47
+ [ff(hid_slice) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
48
+ dim=chunk_dim,
49
+ )
50
+ else:
51
+ # TOOD(Patrick): LoRA scale can be removed once PEFT refactor is complete
52
+ ff_output = torch.cat(
53
+ [ff(hid_slice, scale=lora_scale) for hid_slice in hidden_states.chunk(num_chunks, dim=chunk_dim)],
54
+ dim=chunk_dim,
55
+ )
56
+
57
+ return ff_output
58
+
59
+
60
+ @maybe_allow_in_graph
61
+ class GatedSelfAttentionDense(nn.Module):
62
+ r"""
63
+ A gated self-attention dense layer that combines visual features and object features.
64
+
65
+ Parameters:
66
+ query_dim (`int`): The number of channels in the query.
67
+ context_dim (`int`): The number of channels in the context.
68
+ n_heads (`int`): The number of heads to use for attention.
69
+ d_head (`int`): The number of channels in each head.
70
+ """
71
+
72
+ def __init__(self, query_dim: int, context_dim: int, n_heads: int, d_head: int):
73
+ super().__init__()
74
+
75
+ # we need a linear projection since we need cat visual feature and obj feature
76
+ self.linear = nn.Linear(context_dim, query_dim)
77
+
78
+ self.attn = Attention(query_dim=query_dim, heads=n_heads, dim_head=d_head)
79
+ self.ff = FeedForward(query_dim, activation_fn="geglu")
80
+
81
+ self.norm1 = nn.LayerNorm(query_dim)
82
+ self.norm2 = nn.LayerNorm(query_dim)
83
+
84
+ self.register_parameter("alpha_attn", nn.Parameter(torch.tensor(0.0)))
85
+ self.register_parameter("alpha_dense", nn.Parameter(torch.tensor(0.0)))
86
+
87
+ self.enabled = True
88
+
89
+ def forward(self, x: torch.Tensor, objs: torch.Tensor) -> torch.Tensor:
90
+ if not self.enabled:
91
+ return x
92
+
93
+ n_visual = x.shape[1]
94
+ objs = self.linear(objs)
95
+
96
+ x = x + self.alpha_attn.tanh() * self.attn(self.norm1(torch.cat([x, objs], dim=1)))[:, :n_visual, :]
97
+ x = x + self.alpha_dense.tanh() * self.ff(self.norm2(x))
98
+
99
+ return x
100
+
101
+
102
+ @maybe_allow_in_graph
103
+ class BasicTransformerBlock(nn.Module):
104
+ r"""
105
+ A basic Transformer block.
106
+
107
+ Parameters:
108
+ dim (`int`): The number of channels in the input and output.
109
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
110
+ attention_head_dim (`int`): The number of channels in each head.
111
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
112
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
113
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
114
+ num_embeds_ada_norm (:
115
+ obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
116
+ attention_bias (:
117
+ obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
118
+ only_cross_attention (`bool`, *optional*):
119
+ Whether to use only cross-attention layers. In this case two cross attention layers are used.
120
+ double_self_attention (`bool`, *optional*):
121
+ Whether to use two self-attention layers. In this case no cross attention layers are used.
122
+ upcast_attention (`bool`, *optional*):
123
+ Whether to upcast the attention computation to float32. This is useful for mixed precision training.
124
+ norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
125
+ Whether to use learnable elementwise affine parameters for normalization.
126
+ norm_type (`str`, *optional*, defaults to `"layer_norm"`):
127
+ The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
128
+ final_dropout (`bool` *optional*, defaults to False):
129
+ Whether to apply a final dropout after the last feed-forward layer.
130
+ attention_type (`str`, *optional*, defaults to `"default"`):
131
+ The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
132
+ positional_embeddings (`str`, *optional*, defaults to `None`):
133
+ The type of positional embeddings to apply to.
134
+ num_positional_embeddings (`int`, *optional*, defaults to `None`):
135
+ The maximum number of positional embeddings to apply.
136
+ """
137
+
138
+ def __init__(
139
+ self,
140
+ dim: int,
141
+ num_attention_heads: int,
142
+ attention_head_dim: int,
143
+ dropout=0.0,
144
+ cross_attention_dim: Optional[int] = None,
145
+ activation_fn: str = "geglu",
146
+ num_embeds_ada_norm: Optional[int] = None,
147
+ attention_bias: bool = False,
148
+ only_cross_attention: bool = False,
149
+ double_self_attention: bool = False,
150
+ upcast_attention: bool = False,
151
+ norm_elementwise_affine: bool = True,
152
+ norm_type: str = "layer_norm", # 'layer_norm', 'ada_norm', 'ada_norm_zero', 'ada_norm_single'
153
+ norm_eps: float = 1e-5,
154
+ final_dropout: bool = False,
155
+ attention_type: str = "default",
156
+ positional_embeddings: Optional[str] = None,
157
+ num_positional_embeddings: Optional[int] = None,
158
+ ada_norm_continous_conditioning_embedding_dim: Optional[int] = None,
159
+ ada_norm_bias: Optional[int] = None,
160
+ ff_inner_dim: Optional[int] = None,
161
+ ff_bias: bool = True,
162
+ attention_out_bias: bool = True,
163
+ ):
164
+ super().__init__()
165
+ self.only_cross_attention = only_cross_attention
166
+
167
+ self.use_ada_layer_norm_zero = (num_embeds_ada_norm is not None) and norm_type == "ada_norm_zero"
168
+ self.use_ada_layer_norm = (num_embeds_ada_norm is not None) and norm_type == "ada_norm"
169
+ self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
170
+ self.use_layer_norm = norm_type == "layer_norm"
171
+ self.use_ada_layer_norm_continuous = norm_type == "ada_norm_continuous"
172
+
173
+ if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
174
+ raise ValueError(
175
+ f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
176
+ f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
177
+ )
178
+
179
+ if positional_embeddings and (num_positional_embeddings is None):
180
+ raise ValueError(
181
+ "If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
182
+ )
183
+
184
+ if positional_embeddings == "sinusoidal":
185
+ self.pos_embed = SinusoidalPositionalEmbedding(dim, max_seq_length=num_positional_embeddings)
186
+ else:
187
+ self.pos_embed = None
188
+
189
+ # Define 3 blocks. Each block has its own normalization layer.
190
+ # 1. Self-Attn
191
+ if self.use_ada_layer_norm:
192
+ self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
193
+ elif self.use_ada_layer_norm_zero:
194
+ self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
195
+ elif self.use_ada_layer_norm_continuous:
196
+ self.norm1 = AdaLayerNormContinuous(
197
+ dim,
198
+ ada_norm_continous_conditioning_embedding_dim,
199
+ norm_elementwise_affine,
200
+ norm_eps,
201
+ ada_norm_bias,
202
+ "rms_norm",
203
+ )
204
+ else:
205
+ self.norm1 = nn.LayerNorm(dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps)
206
+
207
+
208
+ self.attn1 = CustomJointAttention(
209
+ query_dim=dim,
210
+ heads=num_attention_heads,
211
+ dim_head=attention_head_dim,
212
+ dropout=dropout,
213
+ bias=attention_bias,
214
+ cross_attention_dim=cross_attention_dim if only_cross_attention else None,
215
+ upcast_attention=upcast_attention,
216
+ out_bias=attention_out_bias
217
+ )
218
+
219
+ # 2. Cross-Attn
220
+ if cross_attention_dim is not None or double_self_attention:
221
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
222
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
223
+ # the second cross attention block.
224
+
225
+ if self.use_ada_layer_norm:
226
+ self.norm2 = AdaLayerNorm(dim, num_embeds_ada_norm)
227
+ elif self.use_ada_layer_norm_continuous:
228
+ self.norm2 = AdaLayerNormContinuous(
229
+ dim,
230
+ ada_norm_continous_conditioning_embedding_dim,
231
+ norm_elementwise_affine,
232
+ norm_eps,
233
+ ada_norm_bias,
234
+ "rms_norm",
235
+ )
236
+ else:
237
+ self.norm2 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
238
+
239
+ self.attn2 = Attention(
240
+ query_dim=dim,
241
+ cross_attention_dim=cross_attention_dim if not double_self_attention else None,
242
+ heads=num_attention_heads,
243
+ dim_head=attention_head_dim,
244
+ dropout=dropout,
245
+ bias=attention_bias,
246
+ upcast_attention=upcast_attention,
247
+ out_bias=attention_out_bias,
248
+ ) # is self-attn if encoder_hidden_states is none
249
+ else:
250
+ self.norm2 = None
251
+ self.attn2 = None
252
+
253
+ # 3. Feed-forward
254
+ if self.use_ada_layer_norm_continuous:
255
+ self.norm3 = AdaLayerNormContinuous(
256
+ dim,
257
+ ada_norm_continous_conditioning_embedding_dim,
258
+ norm_elementwise_affine,
259
+ norm_eps,
260
+ ada_norm_bias,
261
+ "layer_norm",
262
+ )
263
+ elif not self.use_ada_layer_norm_single:
264
+ self.norm3 = nn.LayerNorm(dim, norm_eps, norm_elementwise_affine)
265
+
266
+ self.ff = FeedForward(
267
+ dim,
268
+ dropout=dropout,
269
+ activation_fn=activation_fn,
270
+ final_dropout=final_dropout,
271
+ inner_dim=ff_inner_dim,
272
+ bias=ff_bias,
273
+ )
274
+
275
+ # 4. Fuser
276
+ if attention_type == "gated" or attention_type == "gated-text-image":
277
+ self.fuser = GatedSelfAttentionDense(dim, cross_attention_dim, num_attention_heads, attention_head_dim)
278
+
279
+ # 5. Scale-shift for PixArt-Alpha.
280
+ if self.use_ada_layer_norm_single:
281
+ self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
282
+
283
+ # let chunk size default to None
284
+ self._chunk_size = None
285
+ self._chunk_dim = 0
286
+
287
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
288
+ # Sets chunk feed-forward
289
+ self._chunk_size = chunk_size
290
+ self._chunk_dim = dim
291
+
292
+ def forward(
293
+ self,
294
+ hidden_states: torch.FloatTensor,
295
+ attention_mask: Optional[torch.FloatTensor] = None,
296
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
297
+ encoder_attention_mask: Optional[torch.FloatTensor] = None,
298
+ timestep: Optional[torch.LongTensor] = None,
299
+ cross_attention_kwargs: Dict[str, Any] = None,
300
+ class_labels: Optional[torch.LongTensor] = None,
301
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
302
+ ) -> torch.FloatTensor:
303
+ # Notice that normalization is always applied before the real computation in the following blocks.
304
+
305
+ # 0. Self-Attention
306
+ batch_size = hidden_states.shape[0]
307
+
308
+ if self.use_ada_layer_norm:
309
+ norm_hidden_states = self.norm1(hidden_states, timestep)
310
+ elif self.use_ada_layer_norm_zero:
311
+ norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
312
+ hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
313
+ )
314
+ elif self.use_layer_norm:
315
+ norm_hidden_states = self.norm1(hidden_states)
316
+ elif self.use_ada_layer_norm_continuous:
317
+ norm_hidden_states = self.norm1(hidden_states, added_cond_kwargs["pooled_text_emb"])
318
+ elif self.use_ada_layer_norm_single:
319
+ shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
320
+ self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
321
+ ).chunk(6, dim=1)
322
+ norm_hidden_states = self.norm1(hidden_states)
323
+ norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
324
+ norm_hidden_states = norm_hidden_states.squeeze(1)
325
+ else:
326
+ raise ValueError("Incorrect norm used")
327
+
328
+ if self.pos_embed is not None:
329
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
330
+
331
+ # 1. Retrieve lora scale.
332
+ lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
333
+
334
+ # 2. Prepare GLIGEN inputs
335
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
336
+ gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
337
+
338
+ attn_output = self.attn1(
339
+ norm_hidden_states,
340
+ encoder_hidden_states=encoder_hidden_states if self.only_cross_attention else None,
341
+ attention_mask=attention_mask,
342
+ **cross_attention_kwargs,
343
+ )
344
+ if self.use_ada_layer_norm_zero:
345
+ attn_output = gate_msa.unsqueeze(1) * attn_output
346
+ elif self.use_ada_layer_norm_single:
347
+ attn_output = gate_msa * attn_output
348
+
349
+ hidden_states = attn_output + hidden_states
350
+ if hidden_states.ndim == 4:
351
+ hidden_states = hidden_states.squeeze(1)
352
+
353
+ # 2.5 GLIGEN Control
354
+ if gligen_kwargs is not None:
355
+ hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
356
+
357
+ # 3. Cross-Attention
358
+ if self.attn2 is not None:
359
+ if self.use_ada_layer_norm:
360
+ norm_hidden_states = self.norm2(hidden_states, timestep)
361
+ elif self.use_ada_layer_norm_zero or self.use_layer_norm:
362
+ norm_hidden_states = self.norm2(hidden_states)
363
+ elif self.use_ada_layer_norm_single:
364
+ # For PixArt norm2 isn't applied here:
365
+ # https://github.com/PixArt-alpha/PixArt-alpha/blob/0f55e922376d8b797edd44d25d0e7464b260dcab/diffusion/model/nets/PixArtMS.py#L70C1-L76C103
366
+ norm_hidden_states = hidden_states
367
+ elif self.use_ada_layer_norm_continuous:
368
+ norm_hidden_states = self.norm2(hidden_states, added_cond_kwargs["pooled_text_emb"])
369
+ else:
370
+ raise ValueError("Incorrect norm")
371
+
372
+ if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
373
+ norm_hidden_states = self.pos_embed(norm_hidden_states)
374
+
375
+ attn_output = self.attn2(
376
+ norm_hidden_states,
377
+ encoder_hidden_states=encoder_hidden_states,
378
+ attention_mask=encoder_attention_mask,
379
+ **cross_attention_kwargs,
380
+ )
381
+ hidden_states = attn_output + hidden_states
382
+
383
+ # 4. Feed-forward
384
+ if self.use_ada_layer_norm_continuous:
385
+ norm_hidden_states = self.norm3(hidden_states, added_cond_kwargs["pooled_text_emb"])
386
+ elif not self.use_ada_layer_norm_single:
387
+ norm_hidden_states = self.norm3(hidden_states)
388
+
389
+ if self.use_ada_layer_norm_zero:
390
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
391
+
392
+ if self.use_ada_layer_norm_single:
393
+ norm_hidden_states = self.norm2(hidden_states)
394
+ norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
395
+
396
+ if self._chunk_size is not None:
397
+ # "feed_forward_chunk_size" can be used to save memory
398
+ ff_output = _chunked_feed_forward(
399
+ self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size, lora_scale=lora_scale
400
+ )
401
+ else:
402
+ ff_output = self.ff(norm_hidden_states, scale=lora_scale)
403
+
404
+ if self.use_ada_layer_norm_zero:
405
+ ff_output = gate_mlp.unsqueeze(1) * ff_output
406
+ elif self.use_ada_layer_norm_single:
407
+ ff_output = gate_mlp * ff_output
408
+
409
+ hidden_states = ff_output + hidden_states
410
+ if hidden_states.ndim == 4:
411
+ hidden_states = hidden_states.squeeze(1)
412
+
413
+ return hidden_states
414
+
415
+
416
+ class CustomJointAttention(Attention):
417
+ def set_use_memory_efficient_attention_xformers(
418
+ self, use_memory_efficient_attention_xformers: bool, *args, **kwargs
419
+ ):
420
+ processor = XFormersJointAttnProcessor()
421
+ self.set_processor(processor)
422
+ # print("using xformers attention processor")
423
+
424
+
425
+ class XFormersJointAttnProcessor:
426
+ r"""
427
+ Default processor for performing attention-related computations.
428
+ """
429
+
430
+ def __call__(
431
+ self,
432
+ attn: Attention,
433
+ hidden_states,
434
+ encoder_hidden_states=None,
435
+ attention_mask=None,
436
+ temb=None,
437
+ num_tasks=2
438
+ ):
439
+
440
+ residual = hidden_states
441
+
442
+ if attn.spatial_norm is not None:
443
+ hidden_states = attn.spatial_norm(hidden_states, temb)
444
+
445
+ input_ndim = hidden_states.ndim
446
+
447
+ if input_ndim == 4:
448
+ batch_size, channel, height, width = hidden_states.shape
449
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
450
+
451
+ batch_size, sequence_length, _ = (
452
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
453
+ )
454
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
455
+
456
+ # from yuancheng; here attention_mask is None
457
+ if attention_mask is not None:
458
+ # expand our mask's singleton query_tokens dimension:
459
+ # [batch*heads, 1, key_tokens] ->
460
+ # [batch*heads, query_tokens, key_tokens]
461
+ # so that it can be added as a bias onto the attention scores that xformers computes:
462
+ # [batch*heads, query_tokens, key_tokens]
463
+ # we do this explicitly because xformers doesn't broadcast the singleton dimension for us.
464
+ _, query_tokens, _ = hidden_states.shape
465
+ attention_mask = attention_mask.expand(-1, query_tokens, -1)
466
+
467
+ if attn.group_norm is not None:
468
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
469
+
470
+ query = attn.to_q(hidden_states)
471
+
472
+ if encoder_hidden_states is None:
473
+ encoder_hidden_states = hidden_states
474
+ elif attn.norm_cross:
475
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
476
+
477
+ key = attn.to_k(encoder_hidden_states)
478
+ value = attn.to_v(encoder_hidden_states)
479
+
480
+ assert num_tasks == 2 # only support two tasks now
481
+
482
+ key_0, key_1 = torch.chunk(key, dim=0, chunks=2) # keys shape (b t) d c
483
+ value_0, value_1 = torch.chunk(value, dim=0, chunks=2)
484
+
485
+ # key = torch.cat([key_1, key_0], dim=0)
486
+ # value = torch.cat([value_1, value_0], dim=0)
487
+
488
+ key = torch.cat([key_0, key_1], dim=1) # (b t) 2d c
489
+ value = torch.cat([value_0, value_1], dim=1) # (b t) 2d c
490
+ key = torch.cat([key]*2, dim=0) # (2 b t) 2d c
491
+ value = torch.cat([value]*2, dim=0) # (2 b t) 2d c
492
+
493
+ query = attn.head_to_batch_dim(query).contiguous()
494
+ key = attn.head_to_batch_dim(key).contiguous()
495
+ value = attn.head_to_batch_dim(value).contiguous()
496
+
497
+ hidden_states = xformers.ops.memory_efficient_attention(query, key, value, attn_bias=attention_mask)
498
+ hidden_states = attn.batch_to_head_dim(hidden_states)
499
+
500
+ # linear proj
501
+ hidden_states = attn.to_out[0](hidden_states)
502
+ # dropout
503
+ hidden_states = attn.to_out[1](hidden_states)
504
+
505
+ if input_ndim == 4:
506
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
507
+
508
+ if attn.residual_connection:
509
+ hidden_states = hidden_states + residual
510
+
511
+ hidden_states = hidden_states / attn.rescale_output_factor
512
+
513
+ return hidden_states
514
+
515
+
516
+ @maybe_allow_in_graph
517
+ class TemporalBasicTransformerBlock(nn.Module):
518
+ r"""
519
+ A basic Transformer block for video like data.
520
+
521
+ Parameters:
522
+ dim (`int`): The number of channels in the input and output.
523
+ time_mix_inner_dim (`int`): The number of channels for temporal attention.
524
+ num_attention_heads (`int`): The number of heads to use for multi-head attention.
525
+ attention_head_dim (`int`): The number of channels in each head.
526
+ cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
527
+ """
528
+
529
+ def __init__(
530
+ self,
531
+ dim: int,
532
+ time_mix_inner_dim: int,
533
+ num_attention_heads: int,
534
+ attention_head_dim: int,
535
+ cross_attention_dim: Optional[int] = None,
536
+ ):
537
+ super().__init__()
538
+ self.is_res = dim == time_mix_inner_dim
539
+
540
+ self.norm_in = nn.LayerNorm(dim)
541
+
542
+ # Define 3 blocks. Each block has its own normalization layer.
543
+ # 1. Self-Attn
544
+ self.norm_in = nn.LayerNorm(dim)
545
+ self.ff_in = FeedForward(
546
+ dim,
547
+ dim_out=time_mix_inner_dim,
548
+ activation_fn="geglu",
549
+ )
550
+
551
+ self.norm1 = nn.LayerNorm(time_mix_inner_dim)
552
+ self.attn1 = Attention(
553
+ query_dim=time_mix_inner_dim,
554
+ heads=num_attention_heads,
555
+ dim_head=attention_head_dim,
556
+ cross_attention_dim=None,
557
+ )
558
+
559
+ # 2. Cross-Attn
560
+ if cross_attention_dim is not None:
561
+ # We currently only use AdaLayerNormZero for self attention where there will only be one attention block.
562
+ # I.e. the number of returned modulation chunks from AdaLayerZero would not make sense if returned during
563
+ # the second cross attention block.
564
+ self.norm2 = nn.LayerNorm(time_mix_inner_dim)
565
+ self.attn2 = Attention(
566
+ query_dim=time_mix_inner_dim,
567
+ cross_attention_dim=cross_attention_dim,
568
+ heads=num_attention_heads,
569
+ dim_head=attention_head_dim,
570
+ ) # is self-attn if encoder_hidden_states is none
571
+ else:
572
+ self.norm2 = None
573
+ self.attn2 = None
574
+
575
+ # 3. Feed-forward
576
+ self.norm3 = nn.LayerNorm(time_mix_inner_dim)
577
+ self.ff = FeedForward(time_mix_inner_dim, activation_fn="geglu")
578
+
579
+ # let chunk size default to None
580
+ self._chunk_size = None
581
+ self._chunk_dim = None
582
+
583
+ def set_chunk_feed_forward(self, chunk_size: Optional[int], **kwargs):
584
+ # Sets chunk feed-forward
585
+ self._chunk_size = chunk_size
586
+ # chunk dim should be hardcoded to 1 to have better speed vs. memory trade-off
587
+ self._chunk_dim = 1
588
+
589
+ def forward(
590
+ self,
591
+ hidden_states: torch.FloatTensor,
592
+ num_frames: int,
593
+ encoder_hidden_states: Optional[torch.FloatTensor] = None,
594
+ ) -> torch.FloatTensor:
595
+ # Notice that normalization is always applied before the real computation in the following blocks.
596
+ # 0. Self-Attention
597
+ batch_size = hidden_states.shape[0]
598
+
599
+ batch_frames, seq_length, channels = hidden_states.shape
600
+ batch_size = batch_frames // num_frames
601
+
602
+ hidden_states = hidden_states[None, :].reshape(batch_size, num_frames, seq_length, channels)
603
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
604
+ hidden_states = hidden_states.reshape(batch_size * seq_length, num_frames, channels)
605
+
606
+ residual = hidden_states
607
+ hidden_states = self.norm_in(hidden_states)
608
+
609
+ if self._chunk_size is not None:
610
+ hidden_states = _chunked_feed_forward(self.ff_in, hidden_states, self._chunk_dim, self._chunk_size)
611
+ else:
612
+ hidden_states = self.ff_in(hidden_states)
613
+
614
+ if self.is_res:
615
+ hidden_states = hidden_states + residual
616
+
617
+ norm_hidden_states = self.norm1(hidden_states)
618
+ attn_output = self.attn1(norm_hidden_states, encoder_hidden_states=None)
619
+ hidden_states = attn_output + hidden_states
620
+
621
+ # 3. Cross-Attention
622
+ if self.attn2 is not None:
623
+ norm_hidden_states = self.norm2(hidden_states)
624
+ attn_output = self.attn2(norm_hidden_states, encoder_hidden_states=encoder_hidden_states)
625
+ hidden_states = attn_output + hidden_states
626
+
627
+ # 4. Feed-forward
628
+ norm_hidden_states = self.norm3(hidden_states)
629
+
630
+ if self._chunk_size is not None:
631
+ ff_output = _chunked_feed_forward(self.ff, norm_hidden_states, self._chunk_dim, self._chunk_size)
632
+ else:
633
+ ff_output = self.ff(norm_hidden_states)
634
+
635
+ if self.is_res:
636
+ hidden_states = ff_output + hidden_states
637
+ else:
638
+ hidden_states = ff_output
639
+
640
+ hidden_states = hidden_states[None, :].reshape(batch_size, seq_length, num_frames, channels)
641
+ hidden_states = hidden_states.permute(0, 2, 1, 3)
642
+ hidden_states = hidden_states.reshape(batch_size * num_frames, seq_length, channels)
643
+
644
+ return hidden_states
645
+
646
+
647
+ class SkipFFTransformerBlock(nn.Module):
648
+ def __init__(
649
+ self,
650
+ dim: int,
651
+ num_attention_heads: int,
652
+ attention_head_dim: int,
653
+ kv_input_dim: int,
654
+ kv_input_dim_proj_use_bias: bool,
655
+ dropout=0.0,
656
+ cross_attention_dim: Optional[int] = None,
657
+ attention_bias: bool = False,
658
+ attention_out_bias: bool = True,
659
+ ):
660
+ super().__init__()
661
+ if kv_input_dim != dim:
662
+ self.kv_mapper = nn.Linear(kv_input_dim, dim, kv_input_dim_proj_use_bias)
663
+ else:
664
+ self.kv_mapper = None
665
+
666
+ self.norm1 = RMSNorm(dim, 1e-06)
667
+
668
+ self.attn1 = Attention(
669
+ query_dim=dim,
670
+ heads=num_attention_heads,
671
+ dim_head=attention_head_dim,
672
+ dropout=dropout,
673
+ bias=attention_bias,
674
+ cross_attention_dim=cross_attention_dim,
675
+ out_bias=attention_out_bias,
676
+ )
677
+
678
+ self.norm2 = RMSNorm(dim, 1e-06)
679
+
680
+ self.attn2 = Attention(
681
+ query_dim=dim,
682
+ cross_attention_dim=cross_attention_dim,
683
+ heads=num_attention_heads,
684
+ dim_head=attention_head_dim,
685
+ dropout=dropout,
686
+ bias=attention_bias,
687
+ out_bias=attention_out_bias,
688
+ )
689
+
690
+ def forward(self, hidden_states, encoder_hidden_states, cross_attention_kwargs):
691
+ cross_attention_kwargs = cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
692
+
693
+ if self.kv_mapper is not None:
694
+ encoder_hidden_states = self.kv_mapper(F.silu(encoder_hidden_states))
695
+
696
+ norm_hidden_states = self.norm1(hidden_states)
697
+
698
+ attn_output = self.attn1(
699
+ norm_hidden_states,
700
+ encoder_hidden_states=encoder_hidden_states,
701
+ **cross_attention_kwargs,
702
+ )
703
+
704
+ hidden_states = attn_output + hidden_states
705
+
706
+ norm_hidden_states = self.norm2(hidden_states)
707
+
708
+ attn_output = self.attn2(
709
+ norm_hidden_states,
710
+ encoder_hidden_states=encoder_hidden_states,
711
+ **cross_attention_kwargs,
712
+ )
713
+
714
+ hidden_states = attn_output + hidden_states
715
+
716
+ return hidden_states
717
+
718
+
719
+ class FeedForward(nn.Module):
720
+ r"""
721
+ A feed-forward layer.
722
+
723
+ Parameters:
724
+ dim (`int`): The number of channels in the input.
725
+ dim_out (`int`, *optional*): The number of channels in the output. If not given, defaults to `dim`.
726
+ mult (`int`, *optional*, defaults to 4): The multiplier to use for the hidden dimension.
727
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
728
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
729
+ final_dropout (`bool` *optional*, defaults to False): Apply a final dropout.
730
+ bias (`bool`, defaults to True): Whether to use a bias in the linear layer.
731
+ """
732
+
733
+ def __init__(
734
+ self,
735
+ dim: int,
736
+ dim_out: Optional[int] = None,
737
+ mult: int = 4,
738
+ dropout: float = 0.0,
739
+ activation_fn: str = "geglu",
740
+ final_dropout: bool = False,
741
+ inner_dim=None,
742
+ bias: bool = True,
743
+ ):
744
+ super().__init__()
745
+ if inner_dim is None:
746
+ inner_dim = int(dim * mult)
747
+ dim_out = dim_out if dim_out is not None else dim
748
+ linear_cls = LoRACompatibleLinear if not USE_PEFT_BACKEND else nn.Linear
749
+
750
+ if activation_fn == "gelu":
751
+ act_fn = GELU(dim, inner_dim, bias=bias)
752
+ if activation_fn == "gelu-approximate":
753
+ act_fn = GELU(dim, inner_dim, approximate="tanh", bias=bias)
754
+ elif activation_fn == "geglu":
755
+ act_fn = GEGLU(dim, inner_dim, bias=bias)
756
+ elif activation_fn == "geglu-approximate":
757
+ act_fn = ApproximateGELU(dim, inner_dim, bias=bias)
758
+
759
+ self.net = nn.ModuleList([])
760
+ # project in
761
+ self.net.append(act_fn)
762
+ # project dropout
763
+ self.net.append(nn.Dropout(dropout))
764
+ # project out
765
+ self.net.append(linear_cls(inner_dim, dim_out, bias=bias))
766
+ # FF as used in Vision Transformer, MLP-Mixer, etc. have a final dropout
767
+ if final_dropout:
768
+ self.net.append(nn.Dropout(dropout))
769
+
770
+ def forward(self, hidden_states: torch.Tensor, scale: float = 1.0) -> torch.Tensor:
771
+ compatible_cls = (GEGLU,) if USE_PEFT_BACKEND else (GEGLU, LoRACompatibleLinear)
772
+ for module in self.net:
773
+ if isinstance(module, compatible_cls):
774
+ hidden_states = module(hidden_states, scale)
775
+ else:
776
+ hidden_states = module(hidden_states)
777
+ return hidden_states
geowizard/models/geowizard_object_pipeline.py ADDED
@@ -0,0 +1,341 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Marigold :https://github.com/prs-eth/Marigold
2
+
3
+ from typing import Any, Dict, Union
4
+
5
+ import torch
6
+ from torch.utils.data import DataLoader, TensorDataset
7
+ import numpy as np
8
+ from tqdm.auto import tqdm
9
+ from PIL import Image
10
+ from diffusers import (
11
+ DiffusionPipeline,
12
+ DDIMScheduler,
13
+ AutoencoderKL,
14
+ )
15
+ from models.unet_2d_condition import UNet2DConditionModel
16
+ from diffusers.utils import BaseOutput
17
+ from transformers import CLIPTextModel, CLIPTokenizer
18
+
19
+ from utils.image_util import resize_max_res,chw2hwc,colorize_depth_maps
20
+ from utils.depth_ensemble import ensemble_depths
21
+ from utils.normal_ensemble import ensemble_normals
22
+ from utils.batch_size import find_batch_size
23
+ import cv2
24
+
25
+ class DepthNormalPipelineOutput(BaseOutput):
26
+ """
27
+ Output class for GeoWizard monocular depth & normal prediction pipeline.
28
+ Args:
29
+ depth_np (`np.ndarray`):
30
+ Predicted depth map, with depth values in the range of [0, 1].
31
+ depth_colored (`PIL.Image.Image`):
32
+ Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
33
+ normal_np (`np.ndarray`):
34
+ Predicted normal map, with depth values in the range of [0, 1].
35
+ normal_colored (`PIL.Image.Image`):
36
+ Colorized normal map, with the shape of [3, H, W] and values in [0, 1].
37
+ uncertainty (`None` or `np.ndarray`):
38
+ Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
39
+ """
40
+ depth_np: np.ndarray
41
+ depth_colored: Image.Image
42
+ normal_np: np.ndarray
43
+ normal_colored: Image.Image
44
+ uncertainty: Union[None, np.ndarray]
45
+
46
+ class DepthNormalEstimationPipeline(DiffusionPipeline):
47
+ # two hyper-parameters
48
+ latent_scale_factor = 0.18215
49
+
50
+ def __init__(self,
51
+ unet:UNet2DConditionModel,
52
+ vae:AutoencoderKL,
53
+ scheduler:DDIMScheduler,
54
+ text_encoder:CLIPTextModel,
55
+ tokenizer:CLIPTokenizer,
56
+ ):
57
+ super().__init__()
58
+
59
+ self.register_modules(
60
+ unet=unet,
61
+ vae=vae,
62
+ scheduler=scheduler,
63
+ text_encoder=text_encoder,
64
+ tokenizer=tokenizer,
65
+ )
66
+ self.empty_text_embed = None
67
+
68
+ @torch.no_grad()
69
+ def __call__(self,
70
+ input_image:Image,
71
+ denoising_steps: int = 10,
72
+ ensemble_size: int = 10,
73
+ processing_res: int = 768,
74
+ match_input_res:bool =True,
75
+ batch_size:int = 0,
76
+ domain: str = "indoor",
77
+ color_map: str="Spectral",
78
+ show_progress_bar:bool = True,
79
+ ensemble_kwargs: Dict = None,
80
+ ) -> DepthNormalPipelineOutput:
81
+
82
+ # inherit from thea Diffusion Pipeline
83
+ device = self.device
84
+ input_size = input_image.size
85
+
86
+ # adjust the input resolution.
87
+ if not match_input_res:
88
+ assert (
89
+ processing_res is not None
90
+ )," Value Error: `resize_output_back` is only valid with "
91
+
92
+ assert processing_res >=0
93
+ assert denoising_steps >=1
94
+ assert ensemble_size >=1
95
+
96
+ # --------------- Image Processing ------------------------
97
+ # Resize image
98
+ if processing_res >0:
99
+ input_image = resize_max_res(
100
+ input_image, max_edge_resolution=processing_res
101
+ )
102
+
103
+ # Convert the image to RGB, to 1. reomve the alpha channel.
104
+ input_image = input_image.convert("RGB")
105
+ image = np.array(input_image)
106
+
107
+ # Normalize RGB Values.
108
+ rgb = np.transpose(image,(2,0,1))
109
+ rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1]
110
+ rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
111
+ rgb_norm = rgb_norm.to(device)
112
+
113
+ assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0
114
+
115
+ # ----------------- predicting depth -----------------
116
+ duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
117
+ single_rgb_dataset = TensorDataset(duplicated_rgb)
118
+
119
+ # find the batch size
120
+ if batch_size>0:
121
+ _bs = batch_size
122
+ else:
123
+ _bs = 1
124
+
125
+ single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
126
+
127
+ # predicted the depth
128
+ depth_pred_ls = []
129
+ normal_pred_ls = []
130
+
131
+ if show_progress_bar:
132
+ iterable_bar = tqdm(
133
+ single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False
134
+ )
135
+ else:
136
+ iterable_bar = single_rgb_loader
137
+
138
+ for batch in iterable_bar:
139
+ (batched_image, )= batch # here the image is still around 0-1
140
+
141
+ depth_pred_raw, normal_pred_raw = self.single_infer(
142
+ input_rgb=batched_image,
143
+ num_inference_steps=denoising_steps,
144
+ domain=domain,
145
+ show_pbar=show_progress_bar,
146
+ )
147
+ depth_pred_ls.append(depth_pred_raw.detach().clone())
148
+ normal_pred_ls.append(normal_pred_raw.detach().clone())
149
+
150
+ depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze() #(10,224,768)
151
+ normal_preds = torch.concat(normal_pred_ls, axis=0).squeeze()
152
+ torch.cuda.empty_cache() # clear vram cache for ensembling
153
+
154
+ # ----------------- Test-time ensembling -----------------
155
+ if ensemble_size > 1:
156
+ depth_pred, pred_uncert = ensemble_depths(
157
+ depth_preds, **(ensemble_kwargs or {})
158
+ )
159
+ normal_pred = ensemble_normals(normal_preds)
160
+ else:
161
+ depth_pred = depth_preds
162
+ normal_pred = normal_preds
163
+ pred_uncert = None
164
+
165
+ # ----------------- Post processing -----------------
166
+ # Scale prediction to [0, 1]
167
+ min_d = torch.min(depth_pred)
168
+ max_d = torch.max(depth_pred)
169
+ depth_pred = (depth_pred - min_d) / (max_d - min_d)
170
+
171
+ # Convert to numpy
172
+ depth_pred = depth_pred.cpu().numpy().astype(np.float32)
173
+ normal_pred = normal_pred.cpu().numpy().astype(np.float32)
174
+
175
+ # Resize back to original resolution
176
+ if match_input_res:
177
+ pred_img = Image.fromarray(depth_pred)
178
+ pred_img = pred_img.resize(input_size)
179
+ depth_pred = np.asarray(pred_img)
180
+ normal_pred = cv2.resize(chw2hwc(normal_pred), input_size, interpolation = cv2.INTER_NEAREST)
181
+
182
+ # Clip output range: current size is the original size
183
+ depth_pred = depth_pred.clip(0, 1)
184
+ normal_pred = normal_pred.clip(-1, 1)
185
+
186
+ # Colorize
187
+ depth_colored = colorize_depth_maps(
188
+ depth_pred, 0, 1, cmap=color_map
189
+ ).squeeze() # [3, H, W], value in (0, 1)
190
+ depth_colored = (depth_colored * 255).astype(np.uint8)
191
+ depth_colored_hwc = chw2hwc(depth_colored)
192
+ depth_colored_img = Image.fromarray(depth_colored_hwc)
193
+
194
+ normal_colored = ((normal_pred + 1)/2 * 255).astype(np.uint8)
195
+ normal_colored_img = Image.fromarray(normal_colored)
196
+
197
+ return DepthNormalPipelineOutput(
198
+ depth_np = depth_pred,
199
+ depth_colored = depth_colored_img,
200
+ normal_np = normal_pred,
201
+ normal_colored = normal_colored_img,
202
+ uncertainty=pred_uncert,
203
+ )
204
+
205
+ def __encode_text(self, prompt):
206
+ text_inputs = self.tokenizer(
207
+ prompt,
208
+ padding="do_not_pad",
209
+ max_length=self.tokenizer.model_max_length,
210
+ truncation=True,
211
+ return_tensors="pt",
212
+ )
213
+ text_input_ids = text_inputs.input_ids.to(self.text_encoder.device) #[1,2]
214
+ # print(text_input_ids.shape)
215
+ text_embed = self.text_encoder(text_input_ids)[0].to(self.dtype) #[1,2,1024]
216
+ return text_embed
217
+
218
+ @torch.no_grad()
219
+ def single_infer(self,input_rgb:torch.Tensor,
220
+ num_inference_steps:int,
221
+ domain:str,
222
+ show_pbar:bool,):
223
+
224
+ device = input_rgb.device
225
+
226
+ # Set timesteps: inherit from the diffuison pipeline
227
+ self.scheduler.set_timesteps(num_inference_steps, device=device) # here the numbers of the steps is only 10.
228
+ timesteps = self.scheduler.timesteps # [T]
229
+
230
+ # encode image
231
+ rgb_latent = self.encode_RGB(input_rgb)
232
+
233
+ # Initial geometric maps (Guassian noise)
234
+ geo_latent = torch.randn(rgb_latent.shape, device=device, dtype=self.dtype).repeat(2,1,1,1)
235
+ rgb_latent = rgb_latent.repeat(2,1,1,1)
236
+
237
+ # hybrid switcher
238
+ geo_class = torch.tensor([[0., 1.], [1, 0]], device=device, dtype=self.dtype)
239
+ geo_embedding = torch.cat([torch.sin(geo_class), torch.cos(geo_class)], dim=-1)
240
+
241
+ if domain == "indoor":
242
+ batch_text_embeds = self.__encode_text('indoor geometry').repeat((rgb_latent.shape[0],1,1))
243
+ elif domain == "outdoor":
244
+ batch_text_embeds = self.__encode_text('outdoor geometry').repeat((rgb_latent.shape[0],1,1))
245
+ elif domain == "object":
246
+ batch_text_embeds = self.__encode_text('object geometry').repeat((rgb_latent.shape[0],1,1))
247
+
248
+ class_embedding = geo_embedding
249
+
250
+ # Denoising loop
251
+ if show_pbar:
252
+ iterable = tqdm(
253
+ enumerate(timesteps),
254
+ total=len(timesteps),
255
+ leave=False,
256
+ desc=" " * 4 + "Diffusion denoising",
257
+ )
258
+ else:
259
+ iterable = enumerate(timesteps)
260
+
261
+ for i, t in iterable:
262
+ unet_input = torch.cat([rgb_latent, geo_latent], dim=1)
263
+
264
+ # predict the noise residual
265
+ noise_pred = self.unet(
266
+ unet_input, t.repeat(2), encoder_hidden_states=batch_text_embeds, class_labels=class_embedding
267
+ ).sample # [B, 4, h, w]
268
+
269
+ # compute the previous noisy sample x_t -> x_t-1
270
+ geo_latent = self.scheduler.step(noise_pred, t, geo_latent).prev_sample
271
+
272
+ geo_latent = geo_latent
273
+ torch.cuda.empty_cache()
274
+
275
+ depth = self.decode_depth(geo_latent[0][None])
276
+ depth = torch.clip(depth, -1.0, 1.0)
277
+ depth = (depth + 1.0) / 2.0
278
+
279
+ normal = self.decode_normal(geo_latent[1][None])
280
+ normal /= (torch.norm(normal, p=2, dim=1, keepdim=True)+1e-5)
281
+ normal *= -1.
282
+
283
+ return depth, normal
284
+
285
+
286
+ def encode_RGB(self, rgb_in: torch.Tensor) -> torch.Tensor:
287
+ """
288
+ Encode RGB image into latent.
289
+ Args:
290
+ rgb_in (`torch.Tensor`):
291
+ Input RGB image to be encoded.
292
+ Returns:
293
+ `torch.Tensor`: Image latent.
294
+ """
295
+
296
+ # encode
297
+ h = self.vae.encoder(rgb_in)
298
+
299
+ moments = self.vae.quant_conv(h)
300
+ mean, logvar = torch.chunk(moments, 2, dim=1)
301
+ # scale latent
302
+ rgb_latent = mean * self.latent_scale_factor
303
+
304
+ return rgb_latent
305
+
306
+ def decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
307
+ """
308
+ Decode depth latent into depth map.
309
+ Args:
310
+ depth_latent (`torch.Tensor`):
311
+ Depth latent to be decoded.
312
+ Returns:
313
+ `torch.Tensor`: Decoded depth map.
314
+ """
315
+
316
+ # scale latent
317
+ depth_latent = depth_latent / self.latent_scale_factor
318
+ # decode
319
+ z = self.vae.post_quant_conv(depth_latent)
320
+ stacked = self.vae.decoder(z)
321
+ # mean of output channels
322
+ depth_mean = stacked.mean(dim=1, keepdim=True)
323
+ return depth_mean
324
+
325
+ def decode_normal(self, normal_latent: torch.Tensor) -> torch.Tensor:
326
+ """
327
+ Decode normal latent into normal map.
328
+ Args:
329
+ normal_latent (`torch.Tensor`):
330
+ Depth latent to be decoded.
331
+ Returns:
332
+ `torch.Tensor`: Decoded normal map.
333
+ """
334
+
335
+ # scale latent
336
+ normal_latent = normal_latent / self.latent_scale_factor
337
+ # decode
338
+ z = self.vae.post_quant_conv(normal_latent)
339
+ normal = self.vae.decoder(z)
340
+ return normal
341
+
geowizard/models/geowizard_pipeline.py ADDED
@@ -0,0 +1,360 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Marigold :https://github.com/prs-eth/Marigold
2
+
3
+ from typing import Any, Dict, Union
4
+
5
+ import torch
6
+ from torch.utils.data import DataLoader, TensorDataset
7
+ import numpy as np
8
+ from tqdm.auto import tqdm
9
+ from PIL import Image
10
+ from diffusers import (
11
+ DiffusionPipeline,
12
+ DDIMScheduler,
13
+ AutoencoderKL,
14
+ )
15
+ from models.unet_2d_condition import UNet2DConditionModel
16
+ from diffusers.utils import BaseOutput
17
+ from transformers import CLIPTextModel, CLIPTokenizer
18
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
19
+ import torchvision.transforms.functional as TF
20
+ from torchvision.transforms import InterpolationMode
21
+
22
+ from utils.image_util import resize_max_res,chw2hwc,colorize_depth_maps
23
+ from utils.colormap import kitti_colormap
24
+ from utils.depth_ensemble import ensemble_depths
25
+ from utils.normal_ensemble import ensemble_normals
26
+ from utils.batch_size import find_batch_size
27
+ import cv2
28
+
29
+ class DepthNormalPipelineOutput(BaseOutput):
30
+ """
31
+ Output class for GeoWizard monocular depth & normal prediction pipeline.
32
+ Args:
33
+ depth_np (`np.ndarray`):
34
+ Predicted depth map, with depth values in the range of [0, 1].
35
+ depth_colored (`PIL.Image.Image`):
36
+ Colorized depth map, with the shape of [3, H, W] and values in [0, 1].
37
+ normal_np (`np.ndarray`):
38
+ Predicted normal map, with depth values in the range of [0, 1].
39
+ normal_colored (`PIL.Image.Image`):
40
+ Colorized normal map, with the shape of [3, H, W] and values in [0, 1].
41
+ uncertainty (`None` or `np.ndarray`):
42
+ Uncalibrated uncertainty(MAD, median absolute deviation) coming from ensembling.
43
+ """
44
+ depth_np: np.ndarray
45
+ depth_colored: Image.Image
46
+ normal_np: np.ndarray
47
+ normal_colored: Image.Image
48
+ uncertainty: Union[None, np.ndarray]
49
+
50
+ class DepthNormalEstimationPipeline(DiffusionPipeline):
51
+ # two hyper-parameters
52
+ latent_scale_factor = 0.18215
53
+
54
+ def __init__(self,
55
+ unet:UNet2DConditionModel,
56
+ vae:AutoencoderKL,
57
+ scheduler:DDIMScheduler,
58
+ image_encoder:CLIPVisionModelWithProjection,
59
+ feature_extractor:CLIPImageProcessor,
60
+ ):
61
+ super().__init__()
62
+
63
+ self.register_modules(
64
+ unet=unet,
65
+ vae=vae,
66
+ scheduler=scheduler,
67
+ image_encoder=image_encoder,
68
+ feature_extractor=feature_extractor,
69
+ )
70
+ self.img_embed = None
71
+
72
+ @torch.no_grad()
73
+ def __call__(self,
74
+ input_image:Image,
75
+ denoising_steps: int = 10,
76
+ ensemble_size: int = 10,
77
+ processing_res: int = 768,
78
+ match_input_res:bool =True,
79
+ batch_size:int = 0,
80
+ domain: str = "indoor",
81
+ color_map: str="Spectral",
82
+ show_progress_bar:bool = True,
83
+ ensemble_kwargs: Dict = None,
84
+ ) -> DepthNormalPipelineOutput:
85
+
86
+ # inherit from thea Diffusion Pipeline
87
+ device = self.device
88
+ input_size = input_image.size
89
+
90
+ # adjust the input resolution.
91
+ if not match_input_res:
92
+ assert (
93
+ processing_res is not None
94
+ )," Value Error: `resize_output_back` is only valid with "
95
+
96
+ assert processing_res >=0
97
+ assert denoising_steps >=1
98
+ assert ensemble_size >=1
99
+
100
+ # --------------- Image Processing ------------------------
101
+ # Resize image
102
+ if processing_res >0:
103
+ input_image = resize_max_res(
104
+ input_image, max_edge_resolution=processing_res
105
+ )
106
+
107
+ # Convert the image to RGB, to 1. reomve the alpha channel.
108
+ input_image = input_image.convert("RGB")
109
+ image = np.array(input_image)
110
+
111
+ # Normalize RGB Values.
112
+ rgb = np.transpose(image,(2,0,1))
113
+ rgb_norm = rgb / 255.0 * 2.0 - 1.0 # [0, 255] -> [-1, 1]
114
+ rgb_norm = torch.from_numpy(rgb_norm).to(self.dtype)
115
+ rgb_norm = rgb_norm.to(device)
116
+
117
+ assert rgb_norm.min() >= -1.0 and rgb_norm.max() <= 1.0
118
+
119
+ # ----------------- predicting depth -----------------
120
+ duplicated_rgb = torch.stack([rgb_norm] * ensemble_size)
121
+ single_rgb_dataset = TensorDataset(duplicated_rgb)
122
+
123
+ # find the batch size
124
+ if batch_size>0:
125
+ _bs = batch_size
126
+ else:
127
+ _bs = 1
128
+
129
+ single_rgb_loader = DataLoader(single_rgb_dataset, batch_size=_bs, shuffle=False)
130
+
131
+ # predicted the depth
132
+ depth_pred_ls = []
133
+ normal_pred_ls = []
134
+
135
+ if show_progress_bar:
136
+ iterable_bar = tqdm(
137
+ single_rgb_loader, desc=" " * 2 + "Inference batches", leave=False
138
+ )
139
+ else:
140
+ iterable_bar = single_rgb_loader
141
+
142
+ for batch in iterable_bar:
143
+ (batched_image, )= batch # here the image is still around 0-1
144
+
145
+ depth_pred_raw, normal_pred_raw = self.single_infer(
146
+ input_rgb=batched_image,
147
+ num_inference_steps=denoising_steps,
148
+ domain=domain,
149
+ show_pbar=show_progress_bar,
150
+ )
151
+ depth_pred_ls.append(depth_pred_raw.detach().clone())
152
+ normal_pred_ls.append(normal_pred_raw.detach().clone())
153
+
154
+ depth_preds = torch.concat(depth_pred_ls, axis=0).squeeze() #(10,224,768)
155
+ normal_preds = torch.concat(normal_pred_ls, axis=0).squeeze()
156
+ torch.cuda.empty_cache() # clear vram cache for ensembling
157
+
158
+ # ----------------- Test-time ensembling -----------------
159
+ if ensemble_size > 1:
160
+ depth_pred, pred_uncert = ensemble_depths(
161
+ depth_preds, **(ensemble_kwargs or {})
162
+ )
163
+ normal_pred = ensemble_normals(normal_preds)
164
+ else:
165
+ depth_pred = depth_preds
166
+ normal_pred = normal_preds
167
+ pred_uncert = None
168
+
169
+ # ----------------- Post processing -----------------
170
+ # Scale prediction to [0, 1]
171
+ min_d = torch.min(depth_pred)
172
+ max_d = torch.max(depth_pred)
173
+ depth_pred = (depth_pred - min_d) / (max_d - min_d)
174
+
175
+ # Convert to numpy
176
+ depth_pred = depth_pred.cpu().numpy().astype(np.float32)
177
+ normal_pred = normal_pred.cpu().numpy().astype(np.float32)
178
+
179
+ # Resize back to original resolution
180
+ if match_input_res:
181
+ pred_img = Image.fromarray(depth_pred)
182
+ pred_img = pred_img.resize(input_size)
183
+ depth_pred = np.asarray(pred_img)
184
+ normal_pred = cv2.resize(chw2hwc(normal_pred), input_size, interpolation = cv2.INTER_NEAREST)
185
+
186
+ # Clip output range: current size is the original size
187
+ depth_pred = depth_pred.clip(0, 1)
188
+ normal_pred = normal_pred.clip(-1, 1)
189
+
190
+ # Colorize
191
+ depth_colored = colorize_depth_maps(
192
+ depth_pred, 0, 1, cmap=color_map
193
+ ).squeeze() # [3, H, W], value in (0, 1)
194
+ depth_colored = (depth_colored * 255).astype(np.uint8)
195
+ depth_colored_hwc = chw2hwc(depth_colored)
196
+ depth_colored_img = Image.fromarray(depth_colored_hwc)
197
+
198
+ normal_colored = ((normal_pred + 1)/2 * 255).astype(np.uint8)
199
+ normal_colored_img = Image.fromarray(normal_colored)
200
+
201
+ return DepthNormalPipelineOutput(
202
+ depth_np = depth_pred,
203
+ depth_colored = depth_colored_img,
204
+ normal_np = normal_pred,
205
+ normal_colored = normal_colored_img,
206
+ uncertainty=pred_uncert,
207
+ )
208
+
209
+ def __encode_img_embed(self, rgb):
210
+ """
211
+ Encode clip embeddings for img
212
+ """
213
+ clip_image_mean = torch.as_tensor(self.feature_extractor.image_mean)[:,None,None].to(device=self.device, dtype=self.dtype)
214
+ clip_image_std = torch.as_tensor(self.feature_extractor.image_std)[:,None,None].to(device=self.device, dtype=self.dtype)
215
+
216
+ img_in_proc = TF.resize((rgb +1)/2,
217
+ (self.feature_extractor.crop_size['height'], self.feature_extractor.crop_size['width']),
218
+ interpolation=InterpolationMode.BICUBIC,
219
+ antialias=True
220
+ )
221
+ # do the normalization in float32 to preserve precision
222
+ img_in_proc = ((img_in_proc.float() - clip_image_mean) / clip_image_std).to(self.dtype)
223
+ img_embed = self.image_encoder(img_in_proc).image_embeds.unsqueeze(1).to(self.dtype)
224
+
225
+ self.img_embed = img_embed
226
+
227
+
228
+ @torch.no_grad()
229
+ def single_infer(self,input_rgb:torch.Tensor,
230
+ num_inference_steps:int,
231
+ domain:str,
232
+ show_pbar:bool,):
233
+
234
+ device = input_rgb.device
235
+
236
+ # Set timesteps: inherit from the diffuison pipeline
237
+ self.scheduler.set_timesteps(num_inference_steps, device=device) # here the numbers of the steps is only 10.
238
+ timesteps = self.scheduler.timesteps # [T]
239
+
240
+ # encode image
241
+ rgb_latent = self.encode_RGB(input_rgb)
242
+
243
+ # Initial geometric maps (Guassian noise)
244
+ geo_latent = torch.randn(rgb_latent.shape, device=device, dtype=self.dtype).repeat(2,1,1,1)
245
+ rgb_latent = rgb_latent.repeat(2,1,1,1)
246
+
247
+ # Batched img embedding
248
+ if self.img_embed is None:
249
+ self.__encode_img_embed(input_rgb)
250
+
251
+ batch_img_embed = self.img_embed.repeat(
252
+ (rgb_latent.shape[0], 1, 1)
253
+ ) # [B, 1, 768]
254
+
255
+ # hybrid switcher
256
+ geo_class = torch.tensor([[0., 1.], [1, 0]], device=device, dtype=self.dtype)
257
+ geo_embedding = torch.cat([torch.sin(geo_class), torch.cos(geo_class)], dim=-1)
258
+
259
+ if domain == "indoor":
260
+ domain_class = torch.tensor([[1., 0., 0]], device=device, dtype=self.dtype).repeat(2,1)
261
+ elif domain == "outdoor":
262
+ domain_class = torch.tensor([[0., 1., 0]], device=device, dtype=self.dtype).repeat(2,1)
263
+ elif domain == "object":
264
+ domain_class = torch.tensor([[0., 0., 1]], device=device, dtype=self.dtype).repeat(2,1)
265
+ domain_embedding = torch.cat([torch.sin(domain_class), torch.cos(domain_class)], dim=-1)
266
+
267
+ class_embedding = torch.cat((geo_embedding, domain_embedding), dim=-1)
268
+
269
+ # Denoising loop
270
+ if show_pbar:
271
+ iterable = tqdm(
272
+ enumerate(timesteps),
273
+ total=len(timesteps),
274
+ leave=False,
275
+ desc=" " * 4 + "Diffusion denoising",
276
+ )
277
+ else:
278
+ iterable = enumerate(timesteps)
279
+
280
+ for i, t in iterable:
281
+ unet_input = torch.cat([rgb_latent, geo_latent], dim=1)
282
+
283
+ # predict the noise residual
284
+ noise_pred = self.unet(
285
+ unet_input, t.repeat(2), encoder_hidden_states=batch_img_embed, class_labels=class_embedding
286
+ ).sample # [B, 4, h, w]
287
+
288
+ # compute the previous noisy sample x_t -> x_t-1
289
+ geo_latent = self.scheduler.step(noise_pred, t, geo_latent).prev_sample
290
+
291
+ geo_latent = geo_latent
292
+ torch.cuda.empty_cache()
293
+
294
+ depth = self.decode_depth(geo_latent[0][None])
295
+ depth = torch.clip(depth, -1.0, 1.0)
296
+ depth = (depth + 1.0) / 2.0
297
+
298
+ normal = self.decode_normal(geo_latent[1][None])
299
+ normal /= (torch.norm(normal, p=2, dim=1, keepdim=True)+1e-5)
300
+ normal *= -1.
301
+
302
+ return depth, normal
303
+
304
+
305
+ def encode_RGB(self, rgb_in: torch.Tensor) -> torch.Tensor:
306
+ """
307
+ Encode RGB image into latent.
308
+ Args:
309
+ rgb_in (`torch.Tensor`):
310
+ Input RGB image to be encoded.
311
+ Returns:
312
+ `torch.Tensor`: Image latent.
313
+ """
314
+
315
+ # encode
316
+ h = self.vae.encoder(rgb_in)
317
+
318
+ moments = self.vae.quant_conv(h)
319
+ mean, logvar = torch.chunk(moments, 2, dim=1)
320
+ # scale latent
321
+ rgb_latent = mean * self.latent_scale_factor
322
+
323
+ return rgb_latent
324
+
325
+ def decode_depth(self, depth_latent: torch.Tensor) -> torch.Tensor:
326
+ """
327
+ Decode depth latent into depth map.
328
+ Args:
329
+ depth_latent (`torch.Tensor`):
330
+ Depth latent to be decoded.
331
+ Returns:
332
+ `torch.Tensor`: Decoded depth map.
333
+ """
334
+
335
+ # scale latent
336
+ depth_latent = depth_latent / self.latent_scale_factor
337
+ # decode
338
+ z = self.vae.post_quant_conv(depth_latent)
339
+ stacked = self.vae.decoder(z)
340
+ # mean of output channels
341
+ depth_mean = stacked.mean(dim=1, keepdim=True)
342
+ return depth_mean
343
+
344
+ def decode_normal(self, normal_latent: torch.Tensor) -> torch.Tensor:
345
+ """
346
+ Decode normal latent into normal map.
347
+ Args:
348
+ normal_latent (`torch.Tensor`):
349
+ Depth latent to be decoded.
350
+ Returns:
351
+ `torch.Tensor`: Decoded normal map.
352
+ """
353
+
354
+ # scale latent
355
+ normal_latent = normal_latent / self.latent_scale_factor
356
+ # decode
357
+ z = self.vae.post_quant_conv(normal_latent)
358
+ normal = self.vae.decoder(z)
359
+ return normal
360
+
geowizard/models/transformer_2d.py ADDED
@@ -0,0 +1,463 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Some modifications are reimplemented in public environments by Xiao Fu and Mu Hu
16
+
17
+ from dataclasses import dataclass
18
+ from typing import Any, Dict, Optional
19
+
20
+ import torch
21
+ import torch.nn.functional as F
22
+ from torch import nn
23
+
24
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
25
+ from diffusers.models.embeddings import ImagePositionalEmbeddings
26
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, is_torch_version
27
+ from models.attention import BasicTransformerBlock
28
+ from diffusers.models.embeddings import PatchEmbed, PixArtAlphaTextProjection
29
+ from diffusers.models.lora import LoRACompatibleConv, LoRACompatibleLinear
30
+ from diffusers.models.modeling_utils import ModelMixin
31
+ from diffusers.models.normalization import AdaLayerNormSingle
32
+
33
+
34
+ @dataclass
35
+ class Transformer2DModelOutput(BaseOutput):
36
+ """
37
+ The output of [`Transformer2DModel`].
38
+
39
+ Args:
40
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete):
41
+ The hidden states output conditioned on the `encoder_hidden_states` input. If discrete, returns probability
42
+ distributions for the unnoised latent pixels.
43
+ """
44
+
45
+ sample: torch.FloatTensor
46
+
47
+
48
+ class Transformer2DModel(ModelMixin, ConfigMixin):
49
+ """
50
+ A 2D Transformer model for image-like data.
51
+
52
+ Parameters:
53
+ num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention.
54
+ attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head.
55
+ in_channels (`int`, *optional*):
56
+ The number of channels in the input and output (specify if the input is **continuous**).
57
+ num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use.
58
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
59
+ cross_attention_dim (`int`, *optional*): The number of `encoder_hidden_states` dimensions to use.
60
+ sample_size (`int`, *optional*): The width of the latent images (specify if the input is **discrete**).
61
+ This is fixed during training since it is used to learn a number of position embeddings.
62
+ num_vector_embeds (`int`, *optional*):
63
+ The number of classes of the vector embeddings of the latent pixels (specify if the input is **discrete**).
64
+ Includes the class for the masked latent pixel.
65
+ activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to use in feed-forward.
66
+ num_embeds_ada_norm ( `int`, *optional*):
67
+ The number of diffusion steps used during training. Pass if at least one of the norm_layers is
68
+ `AdaLayerNorm`. This is fixed during training since it is used to learn a number of embeddings that are
69
+ added to the hidden states.
70
+
71
+ During inference, you can denoise for up to but not more steps than `num_embeds_ada_norm`.
72
+ attention_bias (`bool`, *optional*):
73
+ Configure if the `TransformerBlocks` attention should contain a bias parameter.
74
+ """
75
+
76
+ _supports_gradient_checkpointing = True
77
+
78
+ @register_to_config
79
+ def __init__(
80
+ self,
81
+ num_attention_heads: int = 16,
82
+ attention_head_dim: int = 88,
83
+ in_channels: Optional[int] = None,
84
+ out_channels: Optional[int] = None,
85
+ num_layers: int = 1,
86
+ dropout: float = 0.0,
87
+ norm_num_groups: int = 32,
88
+ cross_attention_dim: Optional[int] = None,
89
+ attention_bias: bool = False,
90
+ sample_size: Optional[int] = None,
91
+ num_vector_embeds: Optional[int] = None,
92
+ patch_size: Optional[int] = None,
93
+ activation_fn: str = "geglu",
94
+ num_embeds_ada_norm: Optional[int] = None,
95
+ use_linear_projection: bool = False,
96
+ only_cross_attention: bool = False,
97
+ double_self_attention: bool = False,
98
+ upcast_attention: bool = False,
99
+ norm_type: str = "layer_norm",
100
+ norm_elementwise_affine: bool = True,
101
+ norm_eps: float = 1e-5,
102
+ attention_type: str = "default",
103
+ caption_channels: int = None,
104
+ ):
105
+ super().__init__()
106
+ self.use_linear_projection = use_linear_projection
107
+ self.num_attention_heads = num_attention_heads
108
+ self.attention_head_dim = attention_head_dim
109
+ inner_dim = num_attention_heads * attention_head_dim
110
+
111
+ conv_cls = nn.Conv2d if USE_PEFT_BACKEND else LoRACompatibleConv
112
+ linear_cls = nn.Linear if USE_PEFT_BACKEND else LoRACompatibleLinear
113
+
114
+ # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)`
115
+ # Define whether input is continuous or discrete depending on configuration
116
+ self.is_input_continuous = (in_channels is not None) and (patch_size is None)
117
+ self.is_input_vectorized = num_vector_embeds is not None
118
+ self.is_input_patches = in_channels is not None and patch_size is not None
119
+
120
+ if norm_type == "layer_norm" and num_embeds_ada_norm is not None:
121
+ deprecation_message = (
122
+ f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or"
123
+ " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config."
124
+ " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect"
125
+ " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it"
126
+ " would be very nice if you could open a Pull request for the `transformer/config.json` file"
127
+ )
128
+ deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False)
129
+ norm_type = "ada_norm"
130
+
131
+ if self.is_input_continuous and self.is_input_vectorized:
132
+ raise ValueError(
133
+ f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make"
134
+ " sure that either `in_channels` or `num_vector_embeds` is None."
135
+ )
136
+ elif self.is_input_vectorized and self.is_input_patches:
137
+ raise ValueError(
138
+ f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make"
139
+ " sure that either `num_vector_embeds` or `num_patches` is None."
140
+ )
141
+ elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches:
142
+ raise ValueError(
143
+ f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:"
144
+ f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None."
145
+ )
146
+
147
+ # 2. Define input layers
148
+ if self.is_input_continuous:
149
+ self.in_channels = in_channels
150
+
151
+ self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True)
152
+ if use_linear_projection:
153
+ self.proj_in = linear_cls(in_channels, inner_dim)
154
+ else:
155
+ self.proj_in = conv_cls(in_channels, inner_dim, kernel_size=1, stride=1, padding=0)
156
+ elif self.is_input_vectorized:
157
+ assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size"
158
+ assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed"
159
+
160
+ self.height = sample_size
161
+ self.width = sample_size
162
+ self.num_vector_embeds = num_vector_embeds
163
+ self.num_latent_pixels = self.height * self.width
164
+
165
+ self.latent_image_embedding = ImagePositionalEmbeddings(
166
+ num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width
167
+ )
168
+ elif self.is_input_patches:
169
+ assert sample_size is not None, "Transformer2DModel over patched input must provide sample_size"
170
+
171
+ self.height = sample_size
172
+ self.width = sample_size
173
+
174
+ self.patch_size = patch_size
175
+ interpolation_scale = self.config.sample_size // 64 # => 64 (= 512 pixart) has interpolation scale 1
176
+ interpolation_scale = max(interpolation_scale, 1)
177
+ self.pos_embed = PatchEmbed(
178
+ height=sample_size,
179
+ width=sample_size,
180
+ patch_size=patch_size,
181
+ in_channels=in_channels,
182
+ embed_dim=inner_dim,
183
+ interpolation_scale=interpolation_scale,
184
+ )
185
+
186
+ # 3. Define transformers blocks
187
+ self.transformer_blocks = nn.ModuleList(
188
+ [
189
+ BasicTransformerBlock(
190
+ inner_dim,
191
+ num_attention_heads,
192
+ attention_head_dim,
193
+ dropout=dropout,
194
+ cross_attention_dim=cross_attention_dim,
195
+ activation_fn=activation_fn,
196
+ num_embeds_ada_norm=num_embeds_ada_norm,
197
+ attention_bias=attention_bias,
198
+ only_cross_attention=only_cross_attention,
199
+ double_self_attention=double_self_attention,
200
+ upcast_attention=upcast_attention,
201
+ norm_type=norm_type,
202
+ norm_elementwise_affine=norm_elementwise_affine,
203
+ norm_eps=norm_eps,
204
+ attention_type=attention_type,
205
+ )
206
+ for d in range(num_layers)
207
+ ]
208
+ )
209
+
210
+ # 4. Define output layers
211
+ self.out_channels = in_channels if out_channels is None else out_channels
212
+ if self.is_input_continuous:
213
+ # TODO: should use out_channels for continuous projections
214
+ if use_linear_projection:
215
+ self.proj_out = linear_cls(inner_dim, in_channels)
216
+ else:
217
+ self.proj_out = conv_cls(inner_dim, in_channels, kernel_size=1, stride=1, padding=0)
218
+ elif self.is_input_vectorized:
219
+ self.norm_out = nn.LayerNorm(inner_dim)
220
+ self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1)
221
+ elif self.is_input_patches and norm_type != "ada_norm_single":
222
+ self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
223
+ self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim)
224
+ self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
225
+ elif self.is_input_patches and norm_type == "ada_norm_single":
226
+ self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6)
227
+ self.scale_shift_table = nn.Parameter(torch.randn(2, inner_dim) / inner_dim**0.5)
228
+ self.proj_out = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels)
229
+
230
+ # 5. PixArt-Alpha blocks.
231
+ self.adaln_single = None
232
+ self.use_additional_conditions = False
233
+ if norm_type == "ada_norm_single":
234
+ self.use_additional_conditions = self.config.sample_size == 128
235
+ # TODO(Sayak, PVP) clean this, for now we use sample size to determine whether to use
236
+ # additional conditions until we find better name
237
+ self.adaln_single = AdaLayerNormSingle(inner_dim, use_additional_conditions=self.use_additional_conditions)
238
+
239
+ self.caption_projection = None
240
+ if caption_channels is not None:
241
+ self.caption_projection = PixArtAlphaTextProjection(in_features=caption_channels, hidden_size=inner_dim)
242
+
243
+ self.gradient_checkpointing = False
244
+
245
+ def _set_gradient_checkpointing(self, module, value=False):
246
+ if hasattr(module, "gradient_checkpointing"):
247
+ module.gradient_checkpointing = value
248
+
249
+ def forward(
250
+ self,
251
+ hidden_states: torch.Tensor,
252
+ encoder_hidden_states: Optional[torch.Tensor] = None,
253
+ timestep: Optional[torch.LongTensor] = None,
254
+ added_cond_kwargs: Dict[str, torch.Tensor] = None,
255
+ class_labels: Optional[torch.LongTensor] = None,
256
+ cross_attention_kwargs: Dict[str, Any] = None,
257
+ attention_mask: Optional[torch.Tensor] = None,
258
+ encoder_attention_mask: Optional[torch.Tensor] = None,
259
+ return_dict: bool = True,
260
+ ):
261
+ """
262
+ The [`Transformer2DModel`] forward method.
263
+
264
+ Args:
265
+ hidden_states (`torch.LongTensor` of shape `(batch size, num latent pixels)` if discrete, `torch.FloatTensor` of shape `(batch size, channel, height, width)` if continuous):
266
+ Input `hidden_states`.
267
+ encoder_hidden_states ( `torch.FloatTensor` of shape `(batch size, sequence len, embed dims)`, *optional*):
268
+ Conditional embeddings for cross attention layer. If not given, cross-attention defaults to
269
+ self-attention.
270
+ timestep ( `torch.LongTensor`, *optional*):
271
+ Used to indicate denoising step. Optional timestep to be applied as an embedding in `AdaLayerNorm`.
272
+ class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*):
273
+ Used to indicate class labels conditioning. Optional class labels to be applied as an embedding in
274
+ `AdaLayerZeroNorm`.
275
+ cross_attention_kwargs ( `Dict[str, Any]`, *optional*):
276
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
277
+ `self.processor` in
278
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
279
+ attention_mask ( `torch.Tensor`, *optional*):
280
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
281
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
282
+ negative values to the attention scores corresponding to "discard" tokens.
283
+ encoder_attention_mask ( `torch.Tensor`, *optional*):
284
+ Cross-attention mask applied to `encoder_hidden_states`. Two formats supported:
285
+
286
+ * Mask `(batch, sequence_length)` True = keep, False = discard.
287
+ * Bias `(batch, 1, sequence_length)` 0 = keep, -10000 = discard.
288
+
289
+ If `ndim == 2`: will be interpreted as a mask, then converted into a bias consistent with the format
290
+ above. This bias will be added to the cross-attention scores.
291
+ return_dict (`bool`, *optional*, defaults to `True`):
292
+ Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
293
+ tuple.
294
+
295
+ Returns:
296
+ If `return_dict` is True, an [`~models.transformer_2d.Transformer2DModelOutput`] is returned, otherwise a
297
+ `tuple` where the first element is the sample tensor.
298
+ """
299
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension.
300
+ # we may have done this conversion already, e.g. if we came here via UNet2DConditionModel#forward.
301
+ # we can tell by counting dims; if ndim == 2: it's a mask rather than a bias.
302
+ # expects mask of shape:
303
+ # [batch, key_tokens]
304
+ # adds singleton query_tokens dimension:
305
+ # [batch, 1, key_tokens]
306
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
307
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
308
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
309
+
310
+ if attention_mask is not None and attention_mask.ndim == 2:
311
+ # assume that mask is expressed as:
312
+ # (1 = keep, 0 = discard)
313
+ # convert mask into a bias that can be added to attention scores:
314
+ # (keep = +0, discard = -10000.0)
315
+ attention_mask = (1 - attention_mask.to(hidden_states.dtype)) * -10000.0
316
+ attention_mask = attention_mask.unsqueeze(1)
317
+
318
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
319
+ if encoder_attention_mask is not None and encoder_attention_mask.ndim == 2:
320
+ encoder_attention_mask = (1 - encoder_attention_mask.to(hidden_states.dtype)) * -10000.0
321
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
322
+
323
+ # Retrieve lora scale.
324
+ lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
325
+
326
+ # 1. Input
327
+ if self.is_input_continuous:
328
+ batch, _, height, width = hidden_states.shape
329
+ residual = hidden_states
330
+
331
+ hidden_states = self.norm(hidden_states)
332
+ if not self.use_linear_projection:
333
+ hidden_states = (
334
+ self.proj_in(hidden_states, scale=lora_scale)
335
+ if not USE_PEFT_BACKEND
336
+ else self.proj_in(hidden_states)
337
+ )
338
+ inner_dim = hidden_states.shape[1]
339
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
340
+ else:
341
+ inner_dim = hidden_states.shape[1]
342
+ hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
343
+ hidden_states = (
344
+ self.proj_in(hidden_states, scale=lora_scale)
345
+ if not USE_PEFT_BACKEND
346
+ else self.proj_in(hidden_states)
347
+ )
348
+
349
+ elif self.is_input_vectorized:
350
+ hidden_states = self.latent_image_embedding(hidden_states)
351
+ elif self.is_input_patches:
352
+ height, width = hidden_states.shape[-2] // self.patch_size, hidden_states.shape[-1] // self.patch_size
353
+ hidden_states = self.pos_embed(hidden_states)
354
+
355
+ if self.adaln_single is not None:
356
+ if self.use_additional_conditions and added_cond_kwargs is None:
357
+ raise ValueError(
358
+ "`added_cond_kwargs` cannot be None when using additional conditions for `adaln_single`."
359
+ )
360
+ batch_size = hidden_states.shape[0]
361
+ timestep, embedded_timestep = self.adaln_single(
362
+ timestep, added_cond_kwargs, batch_size=batch_size, hidden_dtype=hidden_states.dtype
363
+ )
364
+
365
+ # 2. Blocks
366
+ if self.caption_projection is not None:
367
+ batch_size = hidden_states.shape[0]
368
+ encoder_hidden_states = self.caption_projection(encoder_hidden_states)
369
+ encoder_hidden_states = encoder_hidden_states.view(batch_size, -1, hidden_states.shape[-1])
370
+
371
+ for block in self.transformer_blocks:
372
+ if self.training and self.gradient_checkpointing:
373
+
374
+ def create_custom_forward(module, return_dict=None):
375
+ def custom_forward(*inputs):
376
+ if return_dict is not None:
377
+ return module(*inputs, return_dict=return_dict)
378
+ else:
379
+ return module(*inputs)
380
+
381
+ return custom_forward
382
+
383
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
384
+ hidden_states = torch.utils.checkpoint.checkpoint(
385
+ create_custom_forward(block),
386
+ hidden_states,
387
+ attention_mask,
388
+ encoder_hidden_states,
389
+ encoder_attention_mask,
390
+ timestep,
391
+ cross_attention_kwargs,
392
+ class_labels,
393
+ **ckpt_kwargs,
394
+ )
395
+ else:
396
+ hidden_states = block(
397
+ hidden_states,
398
+ attention_mask=attention_mask,
399
+ encoder_hidden_states=encoder_hidden_states,
400
+ encoder_attention_mask=encoder_attention_mask,
401
+ timestep=timestep,
402
+ cross_attention_kwargs=cross_attention_kwargs,
403
+ class_labels=class_labels,
404
+ )
405
+
406
+ # 3. Output
407
+ if self.is_input_continuous:
408
+ if not self.use_linear_projection:
409
+ hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
410
+ hidden_states = (
411
+ self.proj_out(hidden_states, scale=lora_scale)
412
+ if not USE_PEFT_BACKEND
413
+ else self.proj_out(hidden_states)
414
+ )
415
+ else:
416
+ hidden_states = (
417
+ self.proj_out(hidden_states, scale=lora_scale)
418
+ if not USE_PEFT_BACKEND
419
+ else self.proj_out(hidden_states)
420
+ )
421
+ hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
422
+
423
+ output = hidden_states + residual
424
+ elif self.is_input_vectorized:
425
+ hidden_states = self.norm_out(hidden_states)
426
+ logits = self.out(hidden_states)
427
+ # (batch, self.num_vector_embeds - 1, self.num_latent_pixels)
428
+ logits = logits.permute(0, 2, 1)
429
+
430
+ # log(p(x_0))
431
+ output = F.log_softmax(logits.double(), dim=1).float()
432
+
433
+ if self.is_input_patches:
434
+ if self.config.norm_type != "ada_norm_single":
435
+ conditioning = self.transformer_blocks[0].norm1.emb(
436
+ timestep, class_labels, hidden_dtype=hidden_states.dtype
437
+ )
438
+ shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1)
439
+ hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None]
440
+ hidden_states = self.proj_out_2(hidden_states)
441
+ elif self.config.norm_type == "ada_norm_single":
442
+ shift, scale = (self.scale_shift_table[None] + embedded_timestep[:, None]).chunk(2, dim=1)
443
+ hidden_states = self.norm_out(hidden_states)
444
+ # Modulation
445
+ hidden_states = hidden_states * (1 + scale) + shift
446
+ hidden_states = self.proj_out(hidden_states)
447
+ hidden_states = hidden_states.squeeze(1)
448
+
449
+ # unpatchify
450
+ if self.adaln_single is None:
451
+ height = width = int(hidden_states.shape[1] ** 0.5)
452
+ hidden_states = hidden_states.reshape(
453
+ shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels)
454
+ )
455
+ hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states)
456
+ output = hidden_states.reshape(
457
+ shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size)
458
+ )
459
+
460
+ if not return_dict:
461
+ return (output,)
462
+
463
+ return Transformer2DModelOutput(sample=output)
geowizard/models/unet_2d_blocks.py ADDED
The diff for this file is too large to render. See raw diff
 
geowizard/models/unet_2d_condition.py ADDED
@@ -0,0 +1,1213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # Some modifications are reimplemented in public environments by Xiao Fu and Mu Hu
16
+
17
+ from dataclasses import dataclass
18
+ from typing import Any, Dict, List, Optional, Tuple, Union
19
+
20
+ import torch
21
+ import torch.nn as nn
22
+ import torch.utils.checkpoint
23
+
24
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
25
+ from diffusers.loaders import UNet2DConditionLoadersMixin
26
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
27
+ from diffusers.models.activations import get_activation
28
+ from diffusers.models.attention_processor import (
29
+ ADDED_KV_ATTENTION_PROCESSORS,
30
+ CROSS_ATTENTION_PROCESSORS,
31
+ Attention,
32
+ AttentionProcessor,
33
+ AttnAddedKVProcessor,
34
+ AttnProcessor,
35
+ )
36
+ from diffusers.models.embeddings import (
37
+ GaussianFourierProjection,
38
+ ImageHintTimeEmbedding,
39
+ ImageProjection,
40
+ ImageTimeEmbedding,
41
+ TextImageProjection,
42
+ TextImageTimeEmbedding,
43
+ TextTimeEmbedding,
44
+ TimestepEmbedding,
45
+ Timesteps,
46
+ )
47
+ from diffusers.models.modeling_utils import ModelMixin
48
+
49
+ from models.unet_2d_blocks import (
50
+ UNetMidBlock2D,
51
+ UNetMidBlock2DCrossAttn,
52
+ UNetMidBlock2DSimpleCrossAttn,
53
+ get_down_block,
54
+ get_up_block,
55
+ )
56
+
57
+
58
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
59
+
60
+
61
+ @dataclass
62
+ class UNet2DConditionOutput(BaseOutput):
63
+ """
64
+ The output of [`UNet2DConditionModel`].
65
+
66
+ Args:
67
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
68
+ The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
69
+ """
70
+
71
+ sample: torch.FloatTensor = None
72
+
73
+
74
+ class UNet2DConditionModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin):
75
+ r"""
76
+ A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
77
+ shaped output.
78
+
79
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
80
+ for all models (such as downloading or saving).
81
+
82
+ Parameters:
83
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
84
+ Height and width of input/output sample.
85
+ in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
86
+ out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
87
+ center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
88
+ flip_sin_to_cos (`bool`, *optional*, defaults to `False`):
89
+ Whether to flip the sin to cos in the time embedding.
90
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
91
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
92
+ The tuple of downsample blocks to use.
93
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
94
+ Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
95
+ `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
96
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
97
+ The tuple of upsample blocks to use.
98
+ only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
99
+ Whether to include self-attention in the basic transformer blocks, see
100
+ [`~models.attention.BasicTransformerBlock`].
101
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
102
+ The tuple of output channels for each block.
103
+ layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
104
+ downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
105
+ mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
106
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
107
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
108
+ norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
109
+ If `None`, normalization and activation layers is skipped in post-processing.
110
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
111
+ cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
112
+ The dimension of the cross attention features.
113
+ transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
114
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
115
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
116
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
117
+ reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
118
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
119
+ blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
120
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
121
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
122
+ encoder_hid_dim (`int`, *optional*, defaults to None):
123
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
124
+ dimension to `cross_attention_dim`.
125
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
126
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
127
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
128
+ attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
129
+ num_attention_heads (`int`, *optional*):
130
+ The number of attention heads. If not defined, defaults to `attention_head_dim`
131
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
132
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
133
+ class_embed_type (`str`, *optional*, defaults to `None`):
134
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
135
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
136
+ addition_embed_type (`str`, *optional*, defaults to `None`):
137
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
138
+ "text". "text" will use the `TextTimeEmbedding` layer.
139
+ addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
140
+ Dimension for the timestep embeddings.
141
+ num_class_embeds (`int`, *optional*, defaults to `None`):
142
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
143
+ class conditioning with `class_embed_type` equal to `None`.
144
+ time_embedding_type (`str`, *optional*, defaults to `positional`):
145
+ The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
146
+ time_embedding_dim (`int`, *optional*, defaults to `None`):
147
+ An optional override for the dimension of the projected time embedding.
148
+ time_embedding_act_fn (`str`, *optional*, defaults to `None`):
149
+ Optional activation function to use only once on the time embeddings before they are passed to the rest of
150
+ the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
151
+ timestep_post_act (`str`, *optional*, defaults to `None`):
152
+ The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
153
+ time_cond_proj_dim (`int`, *optional*, defaults to `None`):
154
+ The dimension of `cond_proj` layer in the timestep embedding.
155
+ conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer. conv_out_kernel (`int`,
156
+ *optional*, default to `3`): The kernel size of `conv_out` layer. projection_class_embeddings_input_dim (`int`,
157
+ *optional*): The dimension of the `class_labels` input when
158
+ `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
159
+ class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
160
+ embeddings with the class embeddings.
161
+ mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
162
+ Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
163
+ `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
164
+ `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
165
+ otherwise.
166
+ """
167
+
168
+ _supports_gradient_checkpointing = True
169
+
170
+ @register_to_config
171
+ def __init__(
172
+ self,
173
+ sample_size: Optional[int] = None,
174
+ in_channels: int = 4,
175
+ out_channels: int = 4,
176
+ center_input_sample: bool = False,
177
+ flip_sin_to_cos: bool = True,
178
+ freq_shift: int = 0,
179
+ down_block_types: Tuple[str] = (
180
+ "CrossAttnDownBlock2D",
181
+ "CrossAttnDownBlock2D",
182
+ "CrossAttnDownBlock2D",
183
+ "DownBlock2D",
184
+ ),
185
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
186
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
187
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
188
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
189
+ layers_per_block: Union[int, Tuple[int]] = 2,
190
+ downsample_padding: int = 1,
191
+ mid_block_scale_factor: float = 1,
192
+ dropout: float = 0.0,
193
+ act_fn: str = "silu",
194
+ norm_num_groups: Optional[int] = 32,
195
+ norm_eps: float = 1e-5,
196
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
197
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
198
+ reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
199
+ encoder_hid_dim: Optional[int] = None,
200
+ encoder_hid_dim_type: Optional[str] = None,
201
+ attention_head_dim: Union[int, Tuple[int]] = 8,
202
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
203
+ dual_cross_attention: bool = False,
204
+ use_linear_projection: bool = False,
205
+ class_embed_type: Optional[str] = None,
206
+ addition_embed_type: Optional[str] = None,
207
+ addition_time_embed_dim: Optional[int] = None,
208
+ num_class_embeds: Optional[int] = None,
209
+ upcast_attention: bool = False,
210
+ resnet_time_scale_shift: str = "default",
211
+ resnet_skip_time_act: bool = False,
212
+ resnet_out_scale_factor: int = 1.0,
213
+ time_embedding_type: str = "positional",
214
+ time_embedding_dim: Optional[int] = None,
215
+ time_embedding_act_fn: Optional[str] = None,
216
+ timestep_post_act: Optional[str] = None,
217
+ time_cond_proj_dim: Optional[int] = None,
218
+ conv_in_kernel: int = 3,
219
+ conv_out_kernel: int = 3,
220
+ projection_class_embeddings_input_dim: Optional[int] = None,
221
+ attention_type: str = "default",
222
+ class_embeddings_concat: bool = False,
223
+ mid_block_only_cross_attention: Optional[bool] = None,
224
+ cross_attention_norm: Optional[str] = None,
225
+ addition_embed_type_num_heads=64,
226
+ ):
227
+ super().__init__()
228
+
229
+ self.sample_size = sample_size
230
+
231
+ if num_attention_heads is not None:
232
+ raise ValueError(
233
+ "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
234
+ )
235
+
236
+ # If `num_attention_heads` is not defined (which is the case for most models)
237
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
238
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
239
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
240
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
241
+ # which is why we correct for the naming here.
242
+ num_attention_heads = num_attention_heads or attention_head_dim
243
+
244
+ # Check inputs
245
+ if len(down_block_types) != len(up_block_types):
246
+ raise ValueError(
247
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
248
+ )
249
+
250
+ if len(block_out_channels) != len(down_block_types):
251
+ raise ValueError(
252
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
253
+ )
254
+
255
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
256
+ raise ValueError(
257
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
258
+ )
259
+
260
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
261
+ raise ValueError(
262
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
263
+ )
264
+
265
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
266
+ raise ValueError(
267
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
268
+ )
269
+
270
+ if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
271
+ raise ValueError(
272
+ f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
273
+ )
274
+
275
+ if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
276
+ raise ValueError(
277
+ f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
278
+ )
279
+ if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
280
+ for layer_number_per_block in transformer_layers_per_block:
281
+ if isinstance(layer_number_per_block, list):
282
+ raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
283
+
284
+ # input
285
+ conv_in_padding = (conv_in_kernel - 1) // 2
286
+ self.conv_in = nn.Conv2d(
287
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
288
+ )
289
+
290
+ # time
291
+ if time_embedding_type == "fourier":
292
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
293
+ if time_embed_dim % 2 != 0:
294
+ raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
295
+ self.time_proj = GaussianFourierProjection(
296
+ time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
297
+ )
298
+ timestep_input_dim = time_embed_dim
299
+ elif time_embedding_type == "positional":
300
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
301
+
302
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
303
+ timestep_input_dim = block_out_channels[0]
304
+ else:
305
+ raise ValueError(
306
+ f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
307
+ )
308
+
309
+ self.time_embedding = TimestepEmbedding(
310
+ timestep_input_dim,
311
+ time_embed_dim,
312
+ act_fn=act_fn,
313
+ post_act_fn=timestep_post_act,
314
+ cond_proj_dim=time_cond_proj_dim,
315
+ )
316
+
317
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
318
+ encoder_hid_dim_type = "text_proj"
319
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
320
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
321
+
322
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
323
+ raise ValueError(
324
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
325
+ )
326
+
327
+ if encoder_hid_dim_type == "text_proj":
328
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
329
+ elif encoder_hid_dim_type == "text_image_proj":
330
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
331
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
332
+ # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)`
333
+ self.encoder_hid_proj = TextImageProjection(
334
+ text_embed_dim=encoder_hid_dim,
335
+ image_embed_dim=cross_attention_dim,
336
+ cross_attention_dim=cross_attention_dim,
337
+ )
338
+ elif encoder_hid_dim_type == "image_proj":
339
+ # Kandinsky 2.2
340
+ self.encoder_hid_proj = ImageProjection(
341
+ image_embed_dim=encoder_hid_dim,
342
+ cross_attention_dim=cross_attention_dim,
343
+ )
344
+ elif encoder_hid_dim_type is not None:
345
+ raise ValueError(
346
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
347
+ )
348
+ else:
349
+ self.encoder_hid_proj = None
350
+
351
+ # class embedding
352
+ if class_embed_type is None and num_class_embeds is not None:
353
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
354
+ elif class_embed_type == "timestep":
355
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
356
+ elif class_embed_type == "identity":
357
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
358
+ elif class_embed_type == "projection":
359
+ if projection_class_embeddings_input_dim is None:
360
+ raise ValueError(
361
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
362
+ )
363
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
364
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
365
+ # 2. it projects from an arbitrary input dimension.
366
+ #
367
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
368
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
369
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
370
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
371
+ elif class_embed_type == "simple_projection":
372
+ if projection_class_embeddings_input_dim is None:
373
+ raise ValueError(
374
+ "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
375
+ )
376
+ self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
377
+ else:
378
+ self.class_embedding = None
379
+
380
+ if addition_embed_type == "text":
381
+ if encoder_hid_dim is not None:
382
+ text_time_embedding_from_dim = encoder_hid_dim
383
+ else:
384
+ text_time_embedding_from_dim = cross_attention_dim
385
+
386
+ self.add_embedding = TextTimeEmbedding(
387
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
388
+ )
389
+ elif addition_embed_type == "text_image":
390
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
391
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
392
+ # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)`
393
+ self.add_embedding = TextImageTimeEmbedding(
394
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
395
+ )
396
+ elif addition_embed_type == "text_time":
397
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
398
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
399
+ elif addition_embed_type == "image":
400
+ # Kandinsky 2.2
401
+ self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
402
+ elif addition_embed_type == "image_hint":
403
+ # Kandinsky 2.2 ControlNet
404
+ self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
405
+ elif addition_embed_type is not None:
406
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
407
+
408
+ if time_embedding_act_fn is None:
409
+ self.time_embed_act = None
410
+ else:
411
+ self.time_embed_act = get_activation(time_embedding_act_fn)
412
+
413
+ self.down_blocks = nn.ModuleList([])
414
+ self.up_blocks = nn.ModuleList([])
415
+
416
+ if isinstance(only_cross_attention, bool):
417
+ if mid_block_only_cross_attention is None:
418
+ mid_block_only_cross_attention = only_cross_attention
419
+
420
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
421
+
422
+ if mid_block_only_cross_attention is None:
423
+ mid_block_only_cross_attention = False
424
+
425
+ if isinstance(num_attention_heads, int):
426
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
427
+
428
+ if isinstance(attention_head_dim, int):
429
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
430
+
431
+ if isinstance(cross_attention_dim, int):
432
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
433
+
434
+ if isinstance(layers_per_block, int):
435
+ layers_per_block = [layers_per_block] * len(down_block_types)
436
+
437
+ if isinstance(transformer_layers_per_block, int):
438
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
439
+
440
+ if class_embeddings_concat:
441
+ # The time embeddings are concatenated with the class embeddings. The dimension of the
442
+ # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
443
+ # regular time embeddings
444
+ blocks_time_embed_dim = time_embed_dim * 2
445
+ else:
446
+ blocks_time_embed_dim = time_embed_dim
447
+
448
+ # down
449
+ output_channel = block_out_channels[0]
450
+ for i, down_block_type in enumerate(down_block_types):
451
+ input_channel = output_channel
452
+ output_channel = block_out_channels[i]
453
+ is_final_block = i == len(block_out_channels) - 1
454
+
455
+ down_block = get_down_block(
456
+ down_block_type,
457
+ num_layers=layers_per_block[i],
458
+ transformer_layers_per_block=transformer_layers_per_block[i],
459
+ in_channels=input_channel,
460
+ out_channels=output_channel,
461
+ temb_channels=blocks_time_embed_dim,
462
+ add_downsample=not is_final_block,
463
+ resnet_eps=norm_eps,
464
+ resnet_act_fn=act_fn,
465
+ resnet_groups=norm_num_groups,
466
+ cross_attention_dim=cross_attention_dim[i],
467
+ num_attention_heads=num_attention_heads[i],
468
+ downsample_padding=downsample_padding,
469
+ dual_cross_attention=dual_cross_attention,
470
+ use_linear_projection=use_linear_projection,
471
+ only_cross_attention=only_cross_attention[i],
472
+ upcast_attention=upcast_attention,
473
+ resnet_time_scale_shift=resnet_time_scale_shift,
474
+ attention_type=attention_type,
475
+ resnet_skip_time_act=resnet_skip_time_act,
476
+ resnet_out_scale_factor=resnet_out_scale_factor,
477
+ cross_attention_norm=cross_attention_norm,
478
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
479
+ dropout=dropout,
480
+ )
481
+ self.down_blocks.append(down_block)
482
+
483
+ # mid
484
+ if mid_block_type == "UNetMidBlock2DCrossAttn":
485
+ self.mid_block = UNetMidBlock2DCrossAttn(
486
+ transformer_layers_per_block=transformer_layers_per_block[-1],
487
+ in_channels=block_out_channels[-1],
488
+ temb_channels=blocks_time_embed_dim,
489
+ dropout=dropout,
490
+ resnet_eps=norm_eps,
491
+ resnet_act_fn=act_fn,
492
+ output_scale_factor=mid_block_scale_factor,
493
+ resnet_time_scale_shift=resnet_time_scale_shift,
494
+ cross_attention_dim=cross_attention_dim[-1],
495
+ num_attention_heads=num_attention_heads[-1],
496
+ resnet_groups=norm_num_groups,
497
+ dual_cross_attention=dual_cross_attention,
498
+ use_linear_projection=use_linear_projection,
499
+ upcast_attention=upcast_attention,
500
+ attention_type=attention_type,
501
+ )
502
+ elif mid_block_type == "UNetMidBlock2DSimpleCrossAttn":
503
+ self.mid_block = UNetMidBlock2DSimpleCrossAttn(
504
+ in_channels=block_out_channels[-1],
505
+ temb_channels=blocks_time_embed_dim,
506
+ dropout=dropout,
507
+ resnet_eps=norm_eps,
508
+ resnet_act_fn=act_fn,
509
+ output_scale_factor=mid_block_scale_factor,
510
+ cross_attention_dim=cross_attention_dim[-1],
511
+ attention_head_dim=attention_head_dim[-1],
512
+ resnet_groups=norm_num_groups,
513
+ resnet_time_scale_shift=resnet_time_scale_shift,
514
+ skip_time_act=resnet_skip_time_act,
515
+ only_cross_attention=mid_block_only_cross_attention,
516
+ cross_attention_norm=cross_attention_norm,
517
+ )
518
+ elif mid_block_type == "UNetMidBlock2D":
519
+ self.mid_block = UNetMidBlock2D(
520
+ in_channels=block_out_channels[-1],
521
+ temb_channels=blocks_time_embed_dim,
522
+ dropout=dropout,
523
+ num_layers=0,
524
+ resnet_eps=norm_eps,
525
+ resnet_act_fn=act_fn,
526
+ output_scale_factor=mid_block_scale_factor,
527
+ resnet_groups=norm_num_groups,
528
+ resnet_time_scale_shift=resnet_time_scale_shift,
529
+ add_attention=False,
530
+ )
531
+ elif mid_block_type is None:
532
+ self.mid_block = None
533
+ else:
534
+ raise ValueError(f"unknown mid_block_type : {mid_block_type}")
535
+
536
+ # count how many layers upsample the images
537
+ self.num_upsamplers = 0
538
+
539
+ # up
540
+ reversed_block_out_channels = list(reversed(block_out_channels))
541
+ reversed_num_attention_heads = list(reversed(num_attention_heads))
542
+ reversed_layers_per_block = list(reversed(layers_per_block))
543
+ reversed_cross_attention_dim = list(reversed(cross_attention_dim))
544
+ reversed_transformer_layers_per_block = (
545
+ list(reversed(transformer_layers_per_block))
546
+ if reverse_transformer_layers_per_block is None
547
+ else reverse_transformer_layers_per_block
548
+ )
549
+ only_cross_attention = list(reversed(only_cross_attention))
550
+
551
+ output_channel = reversed_block_out_channels[0]
552
+ for i, up_block_type in enumerate(up_block_types):
553
+ is_final_block = i == len(block_out_channels) - 1
554
+
555
+ prev_output_channel = output_channel
556
+ output_channel = reversed_block_out_channels[i]
557
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
558
+
559
+ # add upsample block for all BUT final layer
560
+ if not is_final_block:
561
+ add_upsample = True
562
+ self.num_upsamplers += 1
563
+ else:
564
+ add_upsample = False
565
+
566
+ up_block = get_up_block(
567
+ up_block_type,
568
+ num_layers=reversed_layers_per_block[i] + 1,
569
+ transformer_layers_per_block=reversed_transformer_layers_per_block[i],
570
+ in_channels=input_channel,
571
+ out_channels=output_channel,
572
+ prev_output_channel=prev_output_channel,
573
+ temb_channels=blocks_time_embed_dim,
574
+ add_upsample=add_upsample,
575
+ resnet_eps=norm_eps,
576
+ resnet_act_fn=act_fn,
577
+ resolution_idx=i,
578
+ resnet_groups=norm_num_groups,
579
+ cross_attention_dim=reversed_cross_attention_dim[i],
580
+ num_attention_heads=reversed_num_attention_heads[i],
581
+ dual_cross_attention=dual_cross_attention,
582
+ use_linear_projection=use_linear_projection,
583
+ only_cross_attention=only_cross_attention[i],
584
+ upcast_attention=upcast_attention,
585
+ resnet_time_scale_shift=resnet_time_scale_shift,
586
+ attention_type=attention_type,
587
+ resnet_skip_time_act=resnet_skip_time_act,
588
+ resnet_out_scale_factor=resnet_out_scale_factor,
589
+ cross_attention_norm=cross_attention_norm,
590
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
591
+ dropout=dropout,
592
+ )
593
+ self.up_blocks.append(up_block)
594
+ prev_output_channel = output_channel
595
+
596
+ # out
597
+ if norm_num_groups is not None:
598
+ self.conv_norm_out = nn.GroupNorm(
599
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
600
+ )
601
+
602
+ self.conv_act = get_activation(act_fn)
603
+
604
+ else:
605
+ self.conv_norm_out = None
606
+ self.conv_act = None
607
+
608
+ conv_out_padding = (conv_out_kernel - 1) // 2
609
+ self.conv_out = nn.Conv2d(
610
+ block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
611
+ )
612
+
613
+ if attention_type in ["gated", "gated-text-image"]:
614
+ positive_len = 768
615
+ if isinstance(cross_attention_dim, int):
616
+ positive_len = cross_attention_dim
617
+ elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
618
+ positive_len = cross_attention_dim[0]
619
+
620
+ feature_type = "text-only" if attention_type == "gated" else "text-image"
621
+ self.position_net = PositionNet(
622
+ positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
623
+ )
624
+
625
+ @property
626
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
627
+ r"""
628
+ Returns:
629
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
630
+ indexed by its weight name.
631
+ """
632
+ # set recursively
633
+ processors = {}
634
+
635
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
636
+ if hasattr(module, "get_processor"):
637
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
638
+
639
+ for sub_name, child in module.named_children():
640
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
641
+
642
+ return processors
643
+
644
+ for name, module in self.named_children():
645
+ fn_recursive_add_processors(name, module, processors)
646
+
647
+ return processors
648
+
649
+ def set_attn_processor(
650
+ self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]], _remove_lora=False
651
+ ):
652
+ r"""
653
+ Sets the attention processor to use to compute attention.
654
+
655
+ Parameters:
656
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
657
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
658
+ for **all** `Attention` layers.
659
+
660
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
661
+ processor. This is strongly recommended when setting trainable attention processors.
662
+
663
+ """
664
+ count = len(self.attn_processors.keys())
665
+
666
+ if isinstance(processor, dict) and len(processor) != count:
667
+ raise ValueError(
668
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
669
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
670
+ )
671
+
672
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
673
+ if hasattr(module, "set_processor"):
674
+ if not isinstance(processor, dict):
675
+ module.set_processor(processor, _remove_lora=_remove_lora)
676
+ else:
677
+ module.set_processor(processor.pop(f"{name}.processor"), _remove_lora=_remove_lora)
678
+
679
+ for sub_name, child in module.named_children():
680
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
681
+
682
+ for name, module in self.named_children():
683
+ fn_recursive_attn_processor(name, module, processor)
684
+
685
+ def set_default_attn_processor(self):
686
+ """
687
+ Disables custom attention processors and sets the default attention implementation.
688
+ """
689
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
690
+ processor = AttnAddedKVProcessor()
691
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
692
+ processor = AttnProcessor()
693
+ else:
694
+ raise ValueError(
695
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
696
+ )
697
+
698
+ self.set_attn_processor(processor, _remove_lora=True)
699
+
700
+ def set_attention_slice(self, slice_size):
701
+ r"""
702
+ Enable sliced attention computation.
703
+
704
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
705
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
706
+
707
+ Args:
708
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
709
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
710
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
711
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
712
+ must be a multiple of `slice_size`.
713
+ """
714
+ sliceable_head_dims = []
715
+
716
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
717
+ if hasattr(module, "set_attention_slice"):
718
+ sliceable_head_dims.append(module.sliceable_head_dim)
719
+
720
+ for child in module.children():
721
+ fn_recursive_retrieve_sliceable_dims(child)
722
+
723
+ # retrieve number of attention layers
724
+ for module in self.children():
725
+ fn_recursive_retrieve_sliceable_dims(module)
726
+
727
+ num_sliceable_layers = len(sliceable_head_dims)
728
+
729
+ if slice_size == "auto":
730
+ # half the attention head size is usually a good trade-off between
731
+ # speed and memory
732
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
733
+ elif slice_size == "max":
734
+ # make smallest slice possible
735
+ slice_size = num_sliceable_layers * [1]
736
+
737
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
738
+
739
+ if len(slice_size) != len(sliceable_head_dims):
740
+ raise ValueError(
741
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
742
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
743
+ )
744
+
745
+ for i in range(len(slice_size)):
746
+ size = slice_size[i]
747
+ dim = sliceable_head_dims[i]
748
+ if size is not None and size > dim:
749
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
750
+
751
+ # Recursively walk through all the children.
752
+ # Any children which exposes the set_attention_slice method
753
+ # gets the message
754
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
755
+ if hasattr(module, "set_attention_slice"):
756
+ module.set_attention_slice(slice_size.pop())
757
+
758
+ for child in module.children():
759
+ fn_recursive_set_attention_slice(child, slice_size)
760
+
761
+ reversed_slice_size = list(reversed(slice_size))
762
+ for module in self.children():
763
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
764
+
765
+ def _set_gradient_checkpointing(self, module, value=False):
766
+ if hasattr(module, "gradient_checkpointing"):
767
+ module.gradient_checkpointing = value
768
+
769
+ def enable_freeu(self, s1, s2, b1, b2):
770
+ r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
771
+
772
+ The suffixes after the scaling factors represent the stage blocks where they are being applied.
773
+
774
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
775
+ are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
776
+
777
+ Args:
778
+ s1 (`float`):
779
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
780
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
781
+ s2 (`float`):
782
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
783
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
784
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
785
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
786
+ """
787
+ for i, upsample_block in enumerate(self.up_blocks):
788
+ setattr(upsample_block, "s1", s1)
789
+ setattr(upsample_block, "s2", s2)
790
+ setattr(upsample_block, "b1", b1)
791
+ setattr(upsample_block, "b2", b2)
792
+
793
+ def disable_freeu(self):
794
+ """Disables the FreeU mechanism."""
795
+ freeu_keys = {"s1", "s2", "b1", "b2"}
796
+ for i, upsample_block in enumerate(self.up_blocks):
797
+ for k in freeu_keys:
798
+ if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
799
+ setattr(upsample_block, k, None)
800
+
801
+ def fuse_qkv_projections(self):
802
+ """
803
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query,
804
+ key, value) are fused. For cross-attention modules, key and value projection matrices are fused.
805
+
806
+ <Tip warning={true}>
807
+
808
+ This API is πŸ§ͺ experimental.
809
+
810
+ </Tip>
811
+ """
812
+ self.original_attn_processors = None
813
+
814
+ for _, attn_processor in self.attn_processors.items():
815
+ if "Added" in str(attn_processor.__class__.__name__):
816
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
817
+
818
+ self.original_attn_processors = self.attn_processors
819
+
820
+ for module in self.modules():
821
+ if isinstance(module, Attention):
822
+ module.fuse_projections(fuse=True)
823
+
824
+ def unfuse_qkv_projections(self):
825
+ """Disables the fused QKV projection if enabled.
826
+
827
+ <Tip warning={true}>
828
+
829
+ This API is πŸ§ͺ experimental.
830
+
831
+ </Tip>
832
+
833
+ """
834
+ if self.original_attn_processors is not None:
835
+ self.set_attn_processor(self.original_attn_processors)
836
+
837
+ def forward(
838
+ self,
839
+ sample: torch.FloatTensor,
840
+ timestep: Union[torch.Tensor, float, int],
841
+ encoder_hidden_states: torch.Tensor,
842
+ class_labels: Optional[torch.Tensor] = None,
843
+ timestep_cond: Optional[torch.Tensor] = None,
844
+ attention_mask: Optional[torch.Tensor] = None,
845
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
846
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
847
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
848
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
849
+ down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
850
+ encoder_attention_mask: Optional[torch.Tensor] = None,
851
+ return_dict: bool = True,
852
+ ) -> Union[UNet2DConditionOutput, Tuple]:
853
+ r"""
854
+ The [`UNet2DConditionModel`] forward method.
855
+
856
+ Args:
857
+ sample (`torch.FloatTensor`):
858
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
859
+ timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
860
+ encoder_hidden_states (`torch.FloatTensor`):
861
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
862
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
863
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
864
+ timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
865
+ Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
866
+ through the `self.time_embedding` layer to obtain the timestep embeddings.
867
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
868
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
869
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
870
+ negative values to the attention scores corresponding to "discard" tokens.
871
+ cross_attention_kwargs (`dict`, *optional*):
872
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
873
+ `self.processor` in
874
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
875
+ added_cond_kwargs: (`dict`, *optional*):
876
+ A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
877
+ are passed along to the UNet blocks.
878
+ down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
879
+ A tuple of tensors that if specified are added to the residuals of down unet blocks.
880
+ mid_block_additional_residual: (`torch.Tensor`, *optional*):
881
+ A tensor that if specified is added to the residual of the middle unet block.
882
+ encoder_attention_mask (`torch.Tensor`):
883
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
884
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
885
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
886
+ return_dict (`bool`, *optional*, defaults to `True`):
887
+ Whether or not to return a [`~models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
888
+ tuple.
889
+ cross_attention_kwargs (`dict`, *optional*):
890
+ A kwargs dictionary that if specified is passed along to the [`AttnProcessor`].
891
+ added_cond_kwargs: (`dict`, *optional*):
892
+ A kwargs dictionary containin additional embeddings that if specified are added to the embeddings that
893
+ are passed along to the UNet blocks.
894
+ down_block_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
895
+ additional residuals to be added to UNet long skip connections from down blocks to up blocks for
896
+ example from ControlNet side model(s)
897
+ mid_block_additional_residual (`torch.Tensor`, *optional*):
898
+ additional residual to be added to UNet mid block output, for example from ControlNet side model
899
+ down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
900
+ additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
901
+
902
+ Returns:
903
+ [`~models.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
904
+ If `return_dict` is True, an [`~models.unet_2d_condition.UNet2DConditionOutput`] is returned, otherwise
905
+ a `tuple` is returned where the first element is the sample tensor.
906
+ """
907
+
908
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
909
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
910
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
911
+ # on the fly if necessary.
912
+ default_overall_up_factor = 2**self.num_upsamplers
913
+
914
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
915
+ forward_upsample_size = False
916
+ upsample_size = None
917
+
918
+ for dim in sample.shape[-2:]:
919
+ if dim % default_overall_up_factor != 0:
920
+ # Forward upsample size to force interpolation output size.
921
+ forward_upsample_size = True
922
+ break
923
+
924
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
925
+ # expects mask of shape:
926
+ # [batch, key_tokens]
927
+ # adds singleton query_tokens dimension:
928
+ # [batch, 1, key_tokens]
929
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
930
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
931
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
932
+ if attention_mask is not None:
933
+ # assume that mask is expressed as:
934
+ # (1 = keep, 0 = discard)
935
+ # convert mask into a bias that can be added to attention scores:
936
+ # (keep = +0, discard = -10000.0)
937
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
938
+ attention_mask = attention_mask.unsqueeze(1)
939
+
940
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
941
+ if encoder_attention_mask is not None:
942
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
943
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
944
+
945
+ # 0. center input if necessary
946
+ if self.config.center_input_sample:
947
+ sample = 2 * sample - 1.0
948
+
949
+ # 1. time
950
+ timesteps = timestep
951
+ if not torch.is_tensor(timesteps):
952
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
953
+ # This would be a good case for the `match` statement (Python 3.10+)
954
+ is_mps = sample.device.type == "mps"
955
+ if isinstance(timestep, float):
956
+ dtype = torch.float32 if is_mps else torch.float64
957
+ else:
958
+ dtype = torch.int32 if is_mps else torch.int64
959
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
960
+ elif len(timesteps.shape) == 0:
961
+ timesteps = timesteps[None].to(sample.device)
962
+
963
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
964
+ timesteps = timesteps.expand(sample.shape[0])
965
+
966
+ t_emb = self.time_proj(timesteps)
967
+
968
+ # `Timesteps` does not contain any weights and will always return f32 tensors
969
+ # but time_embedding might actually be running in fp16. so we need to cast here.
970
+ # there might be better ways to encapsulate this.
971
+ t_emb = t_emb.to(dtype=sample.dtype)
972
+
973
+ emb = self.time_embedding(t_emb, timestep_cond)
974
+ aug_emb = None
975
+
976
+ if self.class_embedding is not None:
977
+ if class_labels is None:
978
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
979
+
980
+ if self.config.class_embed_type == "timestep":
981
+ class_labels = self.time_proj(class_labels)
982
+
983
+ # `Timesteps` does not contain any weights and will always return f32 tensors
984
+ # there might be better ways to encapsulate this.
985
+ class_labels = class_labels.to(dtype=sample.dtype)
986
+
987
+ class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
988
+
989
+ if self.config.class_embeddings_concat:
990
+ emb = torch.cat([emb, class_emb], dim=-1)
991
+ else:
992
+ emb = emb + class_emb
993
+
994
+ if self.config.addition_embed_type == "text":
995
+ aug_emb = self.add_embedding(encoder_hidden_states)
996
+ elif self.config.addition_embed_type == "text_image":
997
+ # Kandinsky 2.1 - style
998
+ if "image_embeds" not in added_cond_kwargs:
999
+ raise ValueError(
1000
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1001
+ )
1002
+
1003
+ image_embs = added_cond_kwargs.get("image_embeds")
1004
+ text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
1005
+ aug_emb = self.add_embedding(text_embs, image_embs)
1006
+ elif self.config.addition_embed_type == "text_time":
1007
+ # SDXL - style
1008
+ if "text_embeds" not in added_cond_kwargs:
1009
+ raise ValueError(
1010
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
1011
+ )
1012
+ text_embeds = added_cond_kwargs.get("text_embeds")
1013
+ if "time_ids" not in added_cond_kwargs:
1014
+ raise ValueError(
1015
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
1016
+ )
1017
+ time_ids = added_cond_kwargs.get("time_ids")
1018
+ time_embeds = self.add_time_proj(time_ids.flatten())
1019
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
1020
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
1021
+ add_embeds = add_embeds.to(emb.dtype)
1022
+ aug_emb = self.add_embedding(add_embeds)
1023
+ elif self.config.addition_embed_type == "image":
1024
+ # Kandinsky 2.2 - style
1025
+ if "image_embeds" not in added_cond_kwargs:
1026
+ raise ValueError(
1027
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1028
+ )
1029
+ image_embs = added_cond_kwargs.get("image_embeds")
1030
+ aug_emb = self.add_embedding(image_embs)
1031
+ elif self.config.addition_embed_type == "image_hint":
1032
+ # Kandinsky 2.2 - style
1033
+ if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
1034
+ raise ValueError(
1035
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
1036
+ )
1037
+ image_embs = added_cond_kwargs.get("image_embeds")
1038
+ hint = added_cond_kwargs.get("hint")
1039
+ aug_emb, hint = self.add_embedding(image_embs, hint)
1040
+ sample = torch.cat([sample, hint], dim=1)
1041
+
1042
+ emb = emb + aug_emb if aug_emb is not None else emb
1043
+
1044
+ if self.time_embed_act is not None:
1045
+ emb = self.time_embed_act(emb)
1046
+
1047
+ if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
1048
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
1049
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
1050
+ # Kadinsky 2.1 - style
1051
+ if "image_embeds" not in added_cond_kwargs:
1052
+ raise ValueError(
1053
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1054
+ )
1055
+
1056
+ image_embeds = added_cond_kwargs.get("image_embeds")
1057
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
1058
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
1059
+ # Kandinsky 2.2 - style
1060
+ if "image_embeds" not in added_cond_kwargs:
1061
+ raise ValueError(
1062
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1063
+ )
1064
+ image_embeds = added_cond_kwargs.get("image_embeds")
1065
+ encoder_hidden_states = self.encoder_hid_proj(image_embeds)
1066
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
1067
+ if "image_embeds" not in added_cond_kwargs:
1068
+ raise ValueError(
1069
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1070
+ )
1071
+ image_embeds = added_cond_kwargs.get("image_embeds")
1072
+ image_embeds = self.encoder_hid_proj(image_embeds).to(encoder_hidden_states.dtype)
1073
+ encoder_hidden_states = torch.cat([encoder_hidden_states, image_embeds], dim=1)
1074
+
1075
+ # 2. pre-process
1076
+ sample = self.conv_in(sample)
1077
+
1078
+ # 2.5 GLIGEN position net
1079
+ if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
1080
+ cross_attention_kwargs = cross_attention_kwargs.copy()
1081
+ gligen_args = cross_attention_kwargs.pop("gligen")
1082
+ cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
1083
+
1084
+ # 3. down
1085
+ lora_scale = cross_attention_kwargs.get("scale", 1.0) if cross_attention_kwargs is not None else 1.0
1086
+ if USE_PEFT_BACKEND:
1087
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
1088
+ scale_lora_layers(self, lora_scale)
1089
+
1090
+ is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
1091
+ # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
1092
+ is_adapter = down_intrablock_additional_residuals is not None
1093
+ # maintain backward compatibility for legacy usage, where
1094
+ # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
1095
+ # but can only use one or the other
1096
+ if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
1097
+ deprecate(
1098
+ "T2I should not use down_block_additional_residuals",
1099
+ "1.3.0",
1100
+ "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
1101
+ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
1102
+ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
1103
+ standard_warn=False,
1104
+ )
1105
+ down_intrablock_additional_residuals = down_block_additional_residuals
1106
+ is_adapter = True
1107
+
1108
+ down_block_res_samples = (sample,)
1109
+ for downsample_block in self.down_blocks:
1110
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1111
+ # For t2i-adapter CrossAttnDownBlock2D
1112
+ additional_residuals = {}
1113
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1114
+ additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
1115
+
1116
+ sample, res_samples = downsample_block(
1117
+ hidden_states=sample,
1118
+ temb=emb,
1119
+ encoder_hidden_states=encoder_hidden_states,
1120
+ attention_mask=attention_mask,
1121
+ cross_attention_kwargs=cross_attention_kwargs,
1122
+ encoder_attention_mask=encoder_attention_mask,
1123
+ **additional_residuals,
1124
+ )
1125
+ else:
1126
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb, scale=lora_scale)
1127
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1128
+ sample += down_intrablock_additional_residuals.pop(0)
1129
+
1130
+ down_block_res_samples += res_samples
1131
+
1132
+ if is_controlnet:
1133
+ new_down_block_res_samples = ()
1134
+
1135
+ for down_block_res_sample, down_block_additional_residual in zip(
1136
+ down_block_res_samples, down_block_additional_residuals
1137
+ ):
1138
+ down_block_res_sample = down_block_res_sample + down_block_additional_residual
1139
+ new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
1140
+
1141
+ down_block_res_samples = new_down_block_res_samples
1142
+
1143
+ # 4. mid
1144
+ if self.mid_block is not None:
1145
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
1146
+ sample = self.mid_block(
1147
+ sample,
1148
+ emb,
1149
+ encoder_hidden_states=encoder_hidden_states,
1150
+ attention_mask=attention_mask,
1151
+ cross_attention_kwargs=cross_attention_kwargs,
1152
+ encoder_attention_mask=encoder_attention_mask,
1153
+ )
1154
+ else:
1155
+ sample = self.mid_block(sample, emb)
1156
+
1157
+ # To support T2I-Adapter-XL
1158
+ if (
1159
+ is_adapter
1160
+ and len(down_intrablock_additional_residuals) > 0
1161
+ and sample.shape == down_intrablock_additional_residuals[0].shape
1162
+ ):
1163
+ sample += down_intrablock_additional_residuals.pop(0)
1164
+
1165
+ if is_controlnet:
1166
+ sample = sample + mid_block_additional_residual
1167
+
1168
+ # 5. up
1169
+ for i, upsample_block in enumerate(self.up_blocks):
1170
+ is_final_block = i == len(self.up_blocks) - 1
1171
+
1172
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1173
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1174
+
1175
+ # if we have not reached the final block and need to forward the
1176
+ # upsample size, we do it here
1177
+ if not is_final_block and forward_upsample_size:
1178
+ upsample_size = down_block_res_samples[-1].shape[2:]
1179
+
1180
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1181
+ sample = upsample_block(
1182
+ hidden_states=sample,
1183
+ temb=emb,
1184
+ res_hidden_states_tuple=res_samples,
1185
+ encoder_hidden_states=encoder_hidden_states,
1186
+ cross_attention_kwargs=cross_attention_kwargs,
1187
+ upsample_size=upsample_size,
1188
+ attention_mask=attention_mask,
1189
+ encoder_attention_mask=encoder_attention_mask,
1190
+ )
1191
+ else:
1192
+ sample = upsample_block(
1193
+ hidden_states=sample,
1194
+ temb=emb,
1195
+ res_hidden_states_tuple=res_samples,
1196
+ upsample_size=upsample_size,
1197
+ scale=lora_scale,
1198
+ )
1199
+
1200
+ # 6. post-process
1201
+ if self.conv_norm_out:
1202
+ sample = self.conv_norm_out(sample)
1203
+ sample = self.conv_act(sample)
1204
+ sample = self.conv_out(sample)
1205
+
1206
+ if USE_PEFT_BACKEND:
1207
+ # remove `lora_scale` from each PEFT layer
1208
+ unscale_lora_layers(self, lora_scale)
1209
+
1210
+ if not return_dict:
1211
+ return (sample,)
1212
+
1213
+ return UNet2DConditionOutput(sample=sample)
geowizard/run_infer.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Marigold :https://github.com/prs-eth/Marigold
2
+
3
+ import argparse
4
+ import os
5
+ import logging
6
+
7
+ import numpy as np
8
+ import torch
9
+ from PIL import Image
10
+ from tqdm.auto import tqdm
11
+ import glob
12
+ import json
13
+ import cv2
14
+
15
+ import sys
16
+ from models.geowizard_pipeline import DepthNormalEstimationPipeline
17
+ from utils.seed_all import seed_all
18
+ import matplotlib.pyplot as plt
19
+ from utils.depth2normal import *
20
+
21
+ from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
22
+ from models.unet_2d_condition import UNet2DConditionModel
23
+
24
+ from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection
25
+ import torchvision.transforms.functional as TF
26
+ from torchvision.transforms import InterpolationMode
27
+
28
+ if __name__=="__main__":
29
+
30
+ logging.basicConfig(level=logging.INFO)
31
+
32
+ '''Set the Args'''
33
+ parser = argparse.ArgumentParser(
34
+ description="Run MonoDepthNormal Estimation using Stable Diffusion."
35
+ )
36
+ parser.add_argument(
37
+ "--pretrained_model_path",
38
+ type=str,
39
+ default='lemonaddie/geowizard',
40
+ help="pretrained model path from hugging face or local dir",
41
+ )
42
+ parser.add_argument(
43
+ "--input_dir", type=str, required=True, help="Input directory."
44
+ )
45
+
46
+ parser.add_argument(
47
+ "--output_dir", type=str, required=True, help="Output directory."
48
+ )
49
+ parser.add_argument(
50
+ "--domain",
51
+ type=str,
52
+ default='indoor',
53
+ required=True,
54
+ help="domain prediction",
55
+ )
56
+
57
+ # inference setting
58
+ parser.add_argument(
59
+ "--denoise_steps",
60
+ type=int,
61
+ default=10,
62
+ help="Diffusion denoising steps, more steps results in higher accuracy but slower inference speed.",
63
+ )
64
+ parser.add_argument(
65
+ "--ensemble_size",
66
+ type=int,
67
+ default=10,
68
+ help="Number of predictions to be ensembled, more inference gives better results but runs slower.",
69
+ )
70
+ parser.add_argument(
71
+ "--half_precision",
72
+ action="store_true",
73
+ help="Run with half-precision (16-bit float), might lead to suboptimal result.",
74
+ )
75
+
76
+ # resolution setting
77
+ parser.add_argument(
78
+ "--processing_res",
79
+ type=int,
80
+ default=768,
81
+ help="Maximum resolution of processing. 0 for using input image resolution. Default: 768.",
82
+ )
83
+ parser.add_argument(
84
+ "--output_processing_res",
85
+ action="store_true",
86
+ help="When input is resized, out put depth at resized operating resolution. Default: False.",
87
+ )
88
+
89
+ # depth map colormap
90
+ parser.add_argument(
91
+ "--color_map",
92
+ type=str,
93
+ default="Spectral",
94
+ help="Colormap used to render depth predictions.",
95
+ )
96
+ # other settings
97
+ parser.add_argument("--seed", type=int, default=None, help="Random seed.")
98
+ parser.add_argument(
99
+ "--batch_size",
100
+ type=int,
101
+ default=0,
102
+ help="Inference batch size. Default: 0 (will be set automatically).",
103
+ )
104
+
105
+ args = parser.parse_args()
106
+
107
+ checkpoint_path = args.pretrained_model_path
108
+ output_dir = args.output_dir
109
+ denoise_steps = args.denoise_steps
110
+ ensemble_size = args.ensemble_size
111
+
112
+ if ensemble_size>15:
113
+ logging.warning("long ensemble steps, low speed..")
114
+
115
+ half_precision = args.half_precision
116
+
117
+ processing_res = args.processing_res
118
+ match_input_res = not args.output_processing_res
119
+ domain = args.domain
120
+
121
+ color_map = args.color_map
122
+ seed = args.seed
123
+ batch_size = args.batch_size
124
+
125
+ if batch_size==0:
126
+ batch_size = 1 # set default batchsize
127
+
128
+ # -------------------- Preparation --------------------
129
+ # Random seed
130
+ if seed is None:
131
+ import time
132
+ seed = int(time.time())
133
+ seed_all(seed)
134
+
135
+ # Output directories
136
+ output_dir_color = os.path.join(output_dir, "depth_colored")
137
+ output_dir_npy = os.path.join(output_dir, "depth_npy")
138
+ output_dir_normal_npy = os.path.join(output_dir, "normal_npy")
139
+ output_dir_normal_color = os.path.join(output_dir, "normal_colored")
140
+ os.makedirs(output_dir, exist_ok=True)
141
+ os.makedirs(output_dir_color, exist_ok=True)
142
+ os.makedirs(output_dir_npy, exist_ok=True)
143
+ os.makedirs(output_dir_normal_npy, exist_ok=True)
144
+ os.makedirs(output_dir_normal_color, exist_ok=True)
145
+ logging.info(f"output dir = {output_dir}")
146
+
147
+ # -------------------- Device --------------------
148
+ if torch.cuda.is_available():
149
+ device = torch.device("cuda")
150
+ else:
151
+ device = torch.device("cpu")
152
+ logging.warning("CUDA is not available. Running on CPU will be slow.")
153
+ logging.info(f"device = {device}")
154
+
155
+ # -------------------- Data --------------------
156
+ input_dir = args.input_dir
157
+ test_files = sorted(os.listdir(input_dir))
158
+ n_images = len(test_files)
159
+ if n_images > 0:
160
+ logging.info(f"Found {n_images} images")
161
+ else:
162
+ logging.error(f"No image found")
163
+ exit(1)
164
+
165
+ # -------------------- Model --------------------
166
+ if half_precision:
167
+ dtype = torch.float16
168
+ logging.info(f"Running with half precision ({dtype}).")
169
+ else:
170
+ dtype = torch.float32
171
+
172
+ # declare a pipeline
173
+ pipe = DepthNormalEstimationPipeline.from_pretrained(checkpoint_path, torch_dtype=dtype)
174
+
175
+ logging.info("loading pipeline whole successfully.")
176
+
177
+ try:
178
+ pipe.enable_xformers_memory_efficient_attention()
179
+ except:
180
+ pass # run without xformers
181
+
182
+ pipe = pipe.to(device)
183
+
184
+ # -------------------- Inference and saving --------------------
185
+ with torch.no_grad():
186
+ os.makedirs(output_dir, exist_ok=True)
187
+
188
+ for test_file in tqdm(test_files, desc="Estimating Depth & Normal", leave=True):
189
+ rgb_path = os.path.join(input_dir, test_file)
190
+
191
+ # Read input image
192
+ input_image = Image.open(rgb_path)
193
+
194
+ # predict the depth here
195
+ pipe_out = pipe(input_image,
196
+ denoising_steps = denoise_steps,
197
+ ensemble_size= ensemble_size,
198
+ processing_res = processing_res,
199
+ match_input_res = match_input_res,
200
+ domain = domain,
201
+ color_map = color_map,
202
+ show_progress_bar = True,
203
+ )
204
+
205
+ depth_pred: np.ndarray = pipe_out.depth_np
206
+ depth_colored: Image.Image = pipe_out.depth_colored
207
+ normal_pred: np.ndarray = pipe_out.normal_np
208
+ normal_colored: Image.Image = pipe_out.normal_colored
209
+
210
+ # Save as npy
211
+ rgb_name_base = os.path.splitext(os.path.basename(rgb_path))[0]
212
+ pred_name_base = rgb_name_base + "_pred"
213
+ npy_save_path = os.path.join(output_dir_npy, f"{pred_name_base}.npy")
214
+ if os.path.exists(npy_save_path):
215
+ logging.warning(f"Existing file: '{npy_save_path}' will be overwritten")
216
+ np.save(npy_save_path, depth_pred)
217
+
218
+ normal_npy_save_path = os.path.join(output_dir_normal_npy, f"{pred_name_base}.npy")
219
+ if os.path.exists(normal_npy_save_path):
220
+ logging.warning(f"Existing file: '{normal_npy_save_path}' will be overwritten")
221
+ np.save(normal_npy_save_path, normal_pred)
222
+
223
+ # Colorize
224
+ depth_colored_save_path = os.path.join(output_dir_color, f"{pred_name_base}_colored.png")
225
+ if os.path.exists(depth_colored_save_path):
226
+ logging.warning(
227
+ f"Existing file: '{depth_colored_save_path}' will be overwritten"
228
+ )
229
+ depth_colored.save(depth_colored_save_path)
230
+
231
+ normal_colored_save_path = os.path.join(output_dir_normal_color, f"{pred_name_base}_colored.png")
232
+ if os.path.exists(normal_colored_save_path):
233
+ logging.warning(
234
+ f"Existing file: '{normal_colored_save_path}' will be overwritten"
235
+ )
236
+ normal_colored.save(normal_colored_save_path)
geowizard/run_infer_object.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Adapted from Marigold :https://github.com/prs-eth/Marigold
2
+
3
+ import argparse
4
+ import os
5
+ import logging
6
+
7
+ import numpy as np
8
+ import torch
9
+ from PIL import Image
10
+ from tqdm.auto import tqdm
11
+ import glob
12
+ import json
13
+ import cv2
14
+
15
+ import sys
16
+ from models.geowizard_object_pipeline import DepthNormalEstimationPipeline
17
+ from utils.seed_all import seed_all
18
+ import matplotlib.pyplot as plt
19
+ from utils.depth2normal import *
20
+
21
+ from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
22
+ from models.unet_2d_condition import UNet2DConditionModel
23
+
24
+ from transformers import CLIPTextModel, CLIPTokenizer
25
+
26
+ if __name__=="__main__":
27
+
28
+ logging.basicConfig(level=logging.INFO)
29
+
30
+ '''Set the Args'''
31
+ parser = argparse.ArgumentParser(
32
+ description="Run MonoDepthNormal Estimation using Stable Diffusion."
33
+ )
34
+ parser.add_argument(
35
+ "--pretrained_model_path",
36
+ type=str,
37
+ default='lemonaddie/geowizard',
38
+ help="pretrained model path from hugging face or local dir",
39
+ )
40
+ parser.add_argument(
41
+ "--input_dir", type=str, required=True, help="Input directory."
42
+ )
43
+
44
+ parser.add_argument(
45
+ "--output_dir", type=str, required=True, help="Output directory."
46
+ )
47
+ parser.add_argument(
48
+ "--domain",
49
+ type=str,
50
+ default='object',
51
+ help="domain prediction",
52
+ )
53
+
54
+ # inference setting
55
+ parser.add_argument(
56
+ "--denoise_steps",
57
+ type=int,
58
+ default=10,
59
+ help="Diffusion denoising steps, more steps results in higher accuracy but slower inference speed.",
60
+ )
61
+ parser.add_argument(
62
+ "--ensemble_size",
63
+ type=int,
64
+ default=10,
65
+ help="Number of predictions to be ensembled, more inference gives better results but runs slower.",
66
+ )
67
+ parser.add_argument(
68
+ "--half_precision",
69
+ action="store_true",
70
+ help="Run with half-precision (16-bit float), might lead to suboptimal result.",
71
+ )
72
+
73
+ # resolution setting
74
+ parser.add_argument(
75
+ "--processing_res",
76
+ type=int,
77
+ default=768,
78
+ help="Maximum resolution of processing. 0 for using input image resolution. Default: 768.",
79
+ )
80
+ parser.add_argument(
81
+ "--output_processing_res",
82
+ action="store_true",
83
+ help="When input is resized, out put depth at resized operating resolution. Default: False.",
84
+ )
85
+
86
+ # depth map colormap
87
+ parser.add_argument(
88
+ "--color_map",
89
+ type=str,
90
+ default="Spectral",
91
+ help="Colormap used to render depth predictions.",
92
+ )
93
+ # other settings
94
+ parser.add_argument("--seed", type=int, default=None, help="Random seed.")
95
+ parser.add_argument(
96
+ "--batch_size",
97
+ type=int,
98
+ default=0,
99
+ help="Inference batch size. Default: 0 (will be set automatically).",
100
+ )
101
+
102
+ args = parser.parse_args()
103
+
104
+ checkpoint_path = args.pretrained_model_path
105
+ output_dir = args.output_dir
106
+ denoise_steps = args.denoise_steps
107
+ ensemble_size = args.ensemble_size
108
+
109
+ if ensemble_size>15:
110
+ logging.warning("long ensemble steps, low speed..")
111
+
112
+ half_precision = args.half_precision
113
+
114
+ processing_res = args.processing_res
115
+ match_input_res = not args.output_processing_res
116
+ domain = args.domain
117
+
118
+ color_map = args.color_map
119
+ seed = args.seed
120
+ batch_size = args.batch_size
121
+
122
+ if batch_size==0:
123
+ batch_size = 1 # set default batchsize
124
+
125
+ # -------------------- Preparation --------------------
126
+ # Random seed
127
+ if seed is None:
128
+ import time
129
+ seed = int(time.time())
130
+ seed_all(seed)
131
+
132
+ # Output directories
133
+ output_dir_color = os.path.join(output_dir, "depth_colored")
134
+ output_dir_npy = os.path.join(output_dir, "depth_npy")
135
+ output_dir_normal_npy = os.path.join(output_dir, "normal_npy")
136
+ output_dir_normal_color = os.path.join(output_dir, "normal_colored")
137
+ os.makedirs(output_dir, exist_ok=True)
138
+ os.makedirs(output_dir_color, exist_ok=True)
139
+ os.makedirs(output_dir_npy, exist_ok=True)
140
+ os.makedirs(output_dir_normal_npy, exist_ok=True)
141
+ os.makedirs(output_dir_normal_color, exist_ok=True)
142
+ logging.info(f"output dir = {output_dir}")
143
+
144
+ # -------------------- Device --------------------
145
+ if torch.cuda.is_available():
146
+ device = torch.device("cuda")
147
+ else:
148
+ device = torch.device("cpu")
149
+ logging.warning("CUDA is not available. Running on CPU will be slow.")
150
+ logging.info(f"device = {device}")
151
+
152
+ # -------------------- Data --------------------
153
+ input_dir = args.input_dir
154
+ test_files = sorted(os.listdir(input_dir))
155
+ n_images = len(test_files)
156
+ if n_images > 0:
157
+ logging.info(f"Found {n_images} images")
158
+ else:
159
+ logging.error(f"No image found")
160
+ exit(1)
161
+
162
+ # -------------------- Model --------------------
163
+ if half_precision:
164
+ dtype = torch.float16
165
+ logging.info(f"Running with half precision ({dtype}).")
166
+ else:
167
+ dtype = torch.float32
168
+
169
+ # declare a pipeline
170
+ stable_diffusion_repo_path = "stabilityai/stable-diffusion-2"
171
+ vae = AutoencoderKL.from_pretrained(stable_diffusion_repo_path, subfolder='vae')
172
+ text_encoder = CLIPTextModel.from_pretrained(stable_diffusion_repo_path, subfolder='text_encoder')
173
+ scheduler = DDIMScheduler.from_pretrained(stable_diffusion_repo_path, subfolder='scheduler')
174
+ tokenizer = CLIPTokenizer.from_pretrained(stable_diffusion_repo_path, subfolder='tokenizer')
175
+ unet = UNet2DConditionModel.from_pretrained(checkpoint_path, subfolder='unet_object')
176
+
177
+ pipe = DepthNormalEstimationPipeline(vae=vae,
178
+ text_encoder=text_encoder,
179
+ tokenizer=tokenizer,
180
+ unet=unet,
181
+ scheduler=scheduler)
182
+
183
+ logging.info("loading pipeline whole successfully.")
184
+
185
+ try:
186
+ pipe.enable_xformers_memory_efficient_attention()
187
+ except:
188
+ pass # run without xformers
189
+
190
+ pipe = pipe.to(device)
191
+
192
+ # -------------------- Inference and saving --------------------
193
+ with torch.no_grad():
194
+ os.makedirs(output_dir, exist_ok=True)
195
+
196
+ for test_file in tqdm(test_files, desc="Estimating Depth & Normal", leave=True):
197
+ rgb_path = os.path.join(input_dir, test_file)
198
+
199
+ # Read input image
200
+ input_image = Image.open(rgb_path)
201
+
202
+ # predict the depth here
203
+ pipe_out = pipe(input_image,
204
+ denoising_steps = denoise_steps,
205
+ ensemble_size= ensemble_size,
206
+ processing_res = processing_res,
207
+ match_input_res = match_input_res,
208
+ domain = domain,
209
+ color_map = color_map,
210
+ show_progress_bar = True,
211
+ )
212
+
213
+ depth_pred: np.ndarray = pipe_out.depth_np
214
+ depth_colored: Image.Image = pipe_out.depth_colored
215
+ normal_pred: np.ndarray = pipe_out.normal_np
216
+ normal_colored: Image.Image = pipe_out.normal_colored
217
+
218
+ # Save as npy
219
+ rgb_name_base = os.path.splitext(os.path.basename(rgb_path))[0]
220
+ pred_name_base = rgb_name_base + "_pred"
221
+ npy_save_path = os.path.join(output_dir_npy, f"{pred_name_base}.npy")
222
+ if os.path.exists(npy_save_path):
223
+ logging.warning(f"Existing file: '{npy_save_path}' will be overwritten")
224
+ np.save(npy_save_path, depth_pred)
225
+
226
+ normal_npy_save_path = os.path.join(output_dir_normal_npy, f"{pred_name_base}.npy")
227
+ if os.path.exists(normal_npy_save_path):
228
+ logging.warning(f"Existing file: '{normal_npy_save_path}' will be overwritten")
229
+ np.save(normal_npy_save_path, normal_pred)
230
+
231
+ # Colorize
232
+ depth_colored_save_path = os.path.join(output_dir_color, f"{pred_name_base}_colored.png")
233
+ if os.path.exists(depth_colored_save_path):
234
+ logging.warning(
235
+ f"Existing file: '{depth_colored_save_path}' will be overwritten"
236
+ )
237
+ depth_colored.save(depth_colored_save_path)
238
+
239
+ normal_colored_save_path = os.path.join(output_dir_normal_color, f"{pred_name_base}_colored.png")
240
+ if os.path.exists(normal_colored_save_path):
241
+ logging.warning(
242
+ f"Existing file: '{normal_colored_save_path}' will be overwritten"
243
+ )
244
+ normal_colored.save(normal_colored_save_path)
geowizard/utils/README.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Some files are adapted from Marigold :https://github.com/prs-eth/Marigold,
2
+ Thanks for their great work!
geowizard/utils/batch_size.py ADDED
@@ -0,0 +1,63 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import torch
4
+ import math
5
+
6
+
7
+ # Search table for suggested max. inference batch size
8
+ bs_search_table = [
9
+ # tested on A100-PCIE-80GB
10
+ {"res": 768, "total_vram": 79, "bs": 35, "dtype": torch.float32},
11
+ {"res": 1024, "total_vram": 79, "bs": 20, "dtype": torch.float32},
12
+ # tested on A100-PCIE-40GB
13
+ {"res": 768, "total_vram": 39, "bs": 15, "dtype": torch.float32},
14
+ {"res": 1024, "total_vram": 39, "bs": 8, "dtype": torch.float32},
15
+ {"res": 768, "total_vram": 39, "bs": 30, "dtype": torch.float16},
16
+ {"res": 1024, "total_vram": 39, "bs": 15, "dtype": torch.float16},
17
+ # tested on RTX3090, RTX4090
18
+ {"res": 512, "total_vram": 23, "bs": 20, "dtype": torch.float32},
19
+ {"res": 768, "total_vram": 23, "bs": 7, "dtype": torch.float32},
20
+ {"res": 1024, "total_vram": 23, "bs": 3, "dtype": torch.float32},
21
+ {"res": 512, "total_vram": 23, "bs": 40, "dtype": torch.float16},
22
+ {"res": 768, "total_vram": 23, "bs": 18, "dtype": torch.float16},
23
+ {"res": 1024, "total_vram": 23, "bs": 10, "dtype": torch.float16},
24
+ # tested on GTX1080Ti
25
+ {"res": 512, "total_vram": 10, "bs": 5, "dtype": torch.float32},
26
+ {"res": 768, "total_vram": 10, "bs": 2, "dtype": torch.float32},
27
+ {"res": 512, "total_vram": 10, "bs": 10, "dtype": torch.float16},
28
+ {"res": 768, "total_vram": 10, "bs": 5, "dtype": torch.float16},
29
+ {"res": 1024, "total_vram": 10, "bs": 3, "dtype": torch.float16},
30
+ ]
31
+
32
+
33
+ def find_batch_size(ensemble_size: int, input_res: int, dtype: torch.dtype) -> int:
34
+ """
35
+ Automatically search for suitable operating batch size.
36
+
37
+ Args:
38
+ ensemble_size (`int`):
39
+ Number of predictions to be ensembled.
40
+ input_res (`int`):
41
+ Operating resolution of the input image.
42
+
43
+ Returns:
44
+ `int`: Operating batch size.
45
+ """
46
+ if not torch.cuda.is_available():
47
+ return 1
48
+
49
+ total_vram = torch.cuda.mem_get_info()[1] / 1024.0**3
50
+ filtered_bs_search_table = [s for s in bs_search_table if s["dtype"] == dtype]
51
+ for settings in sorted(
52
+ filtered_bs_search_table,
53
+ key=lambda k: (k["res"], -k["total_vram"]),
54
+ ):
55
+ if input_res <= settings["res"] and total_vram >= settings["total_vram"]:
56
+ bs = settings["bs"]
57
+ if bs > ensemble_size:
58
+ bs = ensemble_size
59
+ elif bs > math.ceil(ensemble_size / 2) and bs < ensemble_size:
60
+ bs = math.ceil(ensemble_size / 2)
61
+ return bs
62
+
63
+ return 1
geowizard/utils/colormap.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import numpy as np
4
+ import cv2
5
+
6
+ def kitti_colormap(disparity, maxval=-1):
7
+ """
8
+ A utility function to reproduce KITTI fake colormap
9
+ Arguments:
10
+ - disparity: numpy float32 array of dimension HxW
11
+ - maxval: maximum disparity value for normalization (if equal to -1, the maximum value in disparity will be used)
12
+
13
+ Returns a numpy uint8 array of shape HxWx3.
14
+ """
15
+ if maxval < 0:
16
+ maxval = np.max(disparity)
17
+
18
+ colormap = np.asarray([[0,0,0,114],[0,0,1,185],[1,0,0,114],[1,0,1,174],[0,1,0,114],[0,1,1,185],[1,1,0,114],[1,1,1,0]])
19
+ weights = np.asarray([8.771929824561404,5.405405405405405,8.771929824561404,5.747126436781609,8.771929824561404,5.405405405405405,8.771929824561404,0])
20
+ cumsum = np.asarray([0,0.114,0.299,0.413,0.587,0.701,0.8859999999999999,0.9999999999999999])
21
+
22
+ colored_disp = np.zeros([disparity.shape[0], disparity.shape[1], 3])
23
+ values = np.expand_dims(np.minimum(np.maximum(disparity/maxval, 0.), 1.), -1)
24
+ bins = np.repeat(np.repeat(np.expand_dims(np.expand_dims(cumsum,axis=0),axis=0), disparity.shape[1], axis=1), disparity.shape[0], axis=0)
25
+ diffs = np.where((np.repeat(values, 8, axis=-1) - bins) > 0, -1000, (np.repeat(values, 8, axis=-1) - bins))
26
+ index = np.argmax(diffs, axis=-1)-1
27
+
28
+ w = 1-(values[:,:,0]-cumsum[index])*np.asarray(weights)[index]
29
+
30
+
31
+ colored_disp[:,:,2] = (w*colormap[index][:,:,0] + (1.-w)*colormap[index+1][:,:,0])
32
+ colored_disp[:,:,1] = (w*colormap[index][:,:,1] + (1.-w)*colormap[index+1][:,:,1])
33
+ colored_disp[:,:,0] = (w*colormap[index][:,:,2] + (1.-w)*colormap[index+1][:,:,2])
34
+
35
+ return (colored_disp*np.expand_dims((disparity>0),-1)*255).astype(np.uint8)
36
+
37
+ def read_16bit_gt(path):
38
+ """
39
+ A utility function to read KITTI 16bit gt
40
+ Arguments:
41
+ - path: filepath
42
+ Returns a numpy float32 array of shape HxW.
43
+ """
44
+ gt = cv2.imread(path,-1).astype(np.float32)/256.
45
+ return gt
geowizard/utils/common.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import json
4
+ import yaml
5
+ import logging
6
+ import os
7
+ import numpy as np
8
+ import sys
9
+
10
+ def load_loss_scheme(loss_config):
11
+ with open(loss_config, 'r') as f:
12
+ loss_json = yaml.safe_load(f)
13
+ return loss_json
14
+
15
+
16
+ DEBUG =0
17
+ logger = logging.getLogger()
18
+
19
+
20
+ if DEBUG:
21
+ #coloredlogs.install(level='DEBUG')
22
+ logger.setLevel(logging.DEBUG)
23
+ else:
24
+ #coloredlogs.install(level='INFO')
25
+ logger.setLevel(logging.INFO)
26
+
27
+
28
+ strhdlr = logging.StreamHandler()
29
+ logger.addHandler(strhdlr)
30
+ formatter = logging.Formatter('%(asctime)s [%(filename)s:%(lineno)d] %(levelname)s %(message)s')
31
+ strhdlr.setFormatter(formatter)
32
+
33
+
34
+
35
+ def count_parameters(model):
36
+ return sum(p.numel() for p in model.parameters() if p.requires_grad)
37
+
38
+ def check_path(path):
39
+ if not os.path.exists(path):
40
+ os.makedirs(path, exist_ok=True)
41
+
42
+
geowizard/utils/dataset_configuration.py ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import torch
4
+ import torch.nn as nn
5
+ import torch.nn.functional as F
6
+ import numpy as np
7
+ import sys
8
+ sys.path.append("..")
9
+
10
+ from dataloader.mix_loader import MixDataset
11
+ from torch.utils.data import DataLoader
12
+ from dataloader import transforms
13
+ import os
14
+
15
+
16
+ # Get Dataset Here
17
+ def prepare_dataset(data_dir=None,
18
+ batch_size=1,
19
+ test_batch=1,
20
+ datathread=4,
21
+ logger=None):
22
+
23
+ # set the config parameters
24
+ dataset_config_dict = dict()
25
+
26
+ train_dataset = MixDataset(data_dir=data_dir)
27
+
28
+ img_height, img_width = train_dataset.get_img_size()
29
+
30
+ datathread = datathread
31
+ if os.environ.get('datathread') is not None:
32
+ datathread = int(os.environ.get('datathread'))
33
+
34
+ if logger is not None:
35
+ logger.info("Use %d processes to load data..." % datathread)
36
+
37
+ train_loader = DataLoader(train_dataset, batch_size = batch_size, \
38
+ shuffle = True, num_workers = datathread, \
39
+ pin_memory = True)
40
+
41
+ num_batches_per_epoch = len(train_loader)
42
+
43
+ dataset_config_dict['num_batches_per_epoch'] = num_batches_per_epoch
44
+ dataset_config_dict['img_size'] = (img_height,img_width)
45
+
46
+ return train_loader, dataset_config_dict
47
+
48
+ def depth_scale_shift_normalization(depth):
49
+
50
+ bsz = depth.shape[0]
51
+
52
+ depth_ = depth[:,0,:,:].reshape(bsz,-1).cpu().numpy()
53
+ min_value = torch.from_numpy(np.percentile(a=depth_,q=2,axis=1)).to(depth)[...,None,None,None]
54
+ max_value = torch.from_numpy(np.percentile(a=depth_,q=98,axis=1)).to(depth)[...,None,None,None]
55
+
56
+ normalized_depth = ((depth - min_value)/(max_value-min_value+1e-5) - 0.5) * 2
57
+ normalized_depth = torch.clip(normalized_depth, -1., 1.)
58
+
59
+ return normalized_depth
60
+
61
+
62
+
63
+ def resize_max_res_tensor(input_tensor, mode, recom_resolution=768):
64
+ assert input_tensor.shape[1]==3
65
+ original_H, original_W = input_tensor.shape[2:]
66
+ downscale_factor = min(recom_resolution/original_H, recom_resolution/original_W)
67
+
68
+ if mode == 'normal':
69
+ resized_input_tensor = F.interpolate(input_tensor,
70
+ scale_factor=downscale_factor,
71
+ mode='nearest')
72
+ else:
73
+ resized_input_tensor = F.interpolate(input_tensor,
74
+ scale_factor=downscale_factor,
75
+ mode='bilinear',
76
+ align_corners=False)
77
+
78
+ if mode == 'depth':
79
+ return resized_input_tensor / downscale_factor
80
+ else:
81
+ return resized_input_tensor
geowizard/utils/de_normalized.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ from scipy.optimize import least_squares
3
+ import torch
4
+
5
+ def align_scale_shift(pred, target, clip_max):
6
+ mask = (target > 0) & (target < clip_max)
7
+ if mask.sum() > 10:
8
+ target_mask = target[mask]
9
+ pred_mask = pred[mask]
10
+ scale, shift = np.polyfit(pred_mask, target_mask, deg=1)
11
+ return scale, shift
12
+ else:
13
+ return 1, 0
14
+
15
+ def align_scale(pred: torch.tensor, target: torch.tensor):
16
+ mask = target > 0
17
+ if torch.sum(mask) > 10:
18
+ scale = torch.median(target[mask]) / (torch.median(pred[mask]) + 1e-8)
19
+ else:
20
+ scale = 1
21
+ pred_scale = pred * scale
22
+ return pred_scale, scale
23
+
24
+ def align_shift(pred: torch.tensor, target: torch.tensor):
25
+ mask = target > 0
26
+ if torch.sum(mask) > 10:
27
+ shift = torch.median(target[mask]) - (torch.median(pred[mask]) + 1e-8)
28
+ else:
29
+ shift = 0
30
+ pred_shift = pred + shift
31
+ return pred_shift, shift
geowizard/utils/depth2normal.py ADDED
@@ -0,0 +1,186 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import pickle
4
+ import os
5
+ import h5py
6
+ import numpy as np
7
+ import cv2
8
+ import torch
9
+ import torch.nn as nn
10
+ import glob
11
+
12
+
13
+ def init_image_coor(height, width):
14
+ x_row = np.arange(0, width)
15
+ x = np.tile(x_row, (height, 1))
16
+ x = x[np.newaxis, :, :]
17
+ x = x.astype(np.float32)
18
+ x = torch.from_numpy(x.copy()).cuda()
19
+ u_u0 = x - width/2.0
20
+
21
+ y_col = np.arange(0, height) # y_col = np.arange(0, height)
22
+ y = np.tile(y_col, (width, 1)).T
23
+ y = y[np.newaxis, :, :]
24
+ y = y.astype(np.float32)
25
+ y = torch.from_numpy(y.copy()).cuda()
26
+ v_v0 = y - height/2.0
27
+ return u_u0, v_v0
28
+
29
+
30
+ def depth_to_xyz(depth, focal_length):
31
+ b, c, h, w = depth.shape
32
+ u_u0, v_v0 = init_image_coor(h, w)
33
+ x = u_u0 * depth / focal_length[0]
34
+ y = v_v0 * depth / focal_length[1]
35
+ z = depth
36
+ pw = torch.cat([x, y, z], 1).permute(0, 2, 3, 1) # [b, h, w, c]
37
+ return pw
38
+
39
+
40
+ def get_surface_normal(xyz, patch_size=5):
41
+ # xyz: [1, h, w, 3]
42
+ x, y, z = torch.unbind(xyz, dim=3)
43
+ x = torch.unsqueeze(x, 0)
44
+ y = torch.unsqueeze(y, 0)
45
+ z = torch.unsqueeze(z, 0)
46
+
47
+ xx = x * x
48
+ yy = y * y
49
+ zz = z * z
50
+ xy = x * y
51
+ xz = x * z
52
+ yz = y * z
53
+ patch_weight = torch.ones((1, 1, patch_size, patch_size), requires_grad=False).cuda()
54
+ xx_patch = nn.functional.conv2d(xx, weight=patch_weight, padding=int(patch_size / 2))
55
+ yy_patch = nn.functional.conv2d(yy, weight=patch_weight, padding=int(patch_size / 2))
56
+ zz_patch = nn.functional.conv2d(zz, weight=patch_weight, padding=int(patch_size / 2))
57
+ xy_patch = nn.functional.conv2d(xy, weight=patch_weight, padding=int(patch_size / 2))
58
+ xz_patch = nn.functional.conv2d(xz, weight=patch_weight, padding=int(patch_size / 2))
59
+ yz_patch = nn.functional.conv2d(yz, weight=patch_weight, padding=int(patch_size / 2))
60
+ ATA = torch.stack([xx_patch, xy_patch, xz_patch, xy_patch, yy_patch, yz_patch, xz_patch, yz_patch, zz_patch],
61
+ dim=4)
62
+ ATA = torch.squeeze(ATA)
63
+ ATA = torch.reshape(ATA, (ATA.size(0), ATA.size(1), 3, 3))
64
+ eps_identity = 1e-6 * torch.eye(3, device=ATA.device, dtype=ATA.dtype)[None, None, :, :].repeat([ATA.size(0), ATA.size(1), 1, 1])
65
+ ATA = ATA + eps_identity
66
+ x_patch = nn.functional.conv2d(x, weight=patch_weight, padding=int(patch_size / 2))
67
+ y_patch = nn.functional.conv2d(y, weight=patch_weight, padding=int(patch_size / 2))
68
+ z_patch = nn.functional.conv2d(z, weight=patch_weight, padding=int(patch_size / 2))
69
+ AT1 = torch.stack([x_patch, y_patch, z_patch], dim=4)
70
+ AT1 = torch.squeeze(AT1)
71
+ AT1 = torch.unsqueeze(AT1, 3)
72
+
73
+ patch_num = 4
74
+ patch_x = int(AT1.size(1) / patch_num)
75
+ patch_y = int(AT1.size(0) / patch_num)
76
+ n_img = torch.randn(AT1.shape).cuda()
77
+ overlap = patch_size // 2 + 1
78
+ for x in range(int(patch_num)):
79
+ for y in range(int(patch_num)):
80
+ left_flg = 0 if x == 0 else 1
81
+ right_flg = 0 if x == patch_num -1 else 1
82
+ top_flg = 0 if y == 0 else 1
83
+ btm_flg = 0 if y == patch_num - 1 else 1
84
+ at1 = AT1[y * patch_y - top_flg * overlap:(y + 1) * patch_y + btm_flg * overlap,
85
+ x * patch_x - left_flg * overlap:(x + 1) * patch_x + right_flg * overlap]
86
+ ata = ATA[y * patch_y - top_flg * overlap:(y + 1) * patch_y + btm_flg * overlap,
87
+ x * patch_x - left_flg * overlap:(x + 1) * patch_x + right_flg * overlap]
88
+ # n_img_tmp, _ = torch.solve(at1, ata)
89
+ n_img_tmp = torch.linalg.solve(ata, at1)
90
+
91
+ n_img_tmp_select = n_img_tmp[top_flg * overlap:patch_y + top_flg * overlap, left_flg * overlap:patch_x + left_flg * overlap, :, :]
92
+ n_img[y * patch_y:y * patch_y + patch_y, x * patch_x:x * patch_x + patch_x, :, :] = n_img_tmp_select
93
+
94
+ n_img_L2 = torch.sqrt(torch.sum(n_img ** 2, dim=2, keepdim=True))
95
+ n_img_norm = n_img / n_img_L2
96
+
97
+ # re-orient normals consistently
98
+ orient_mask = torch.sum(torch.squeeze(n_img_norm) * torch.squeeze(xyz), dim=2) > 0
99
+ n_img_norm[orient_mask] *= -1
100
+ return n_img_norm
101
+
102
+ def get_surface_normalv2(xyz, patch_size=5):
103
+ """
104
+ xyz: xyz coordinates
105
+ patch: [p1, p2, p3,
106
+ p4, p5, p6,
107
+ p7, p8, p9]
108
+ surface_normal = [(p9-p1) x (p3-p7)] + [(p6-p4) - (p8-p2)]
109
+ return: normal [h, w, 3, b]
110
+ """
111
+ b, h, w, c = xyz.shape
112
+ half_patch = patch_size // 2
113
+ xyz_pad = torch.zeros((b, h + patch_size - 1, w + patch_size - 1, c), dtype=xyz.dtype, device=xyz.device)
114
+ xyz_pad[:, half_patch:-half_patch, half_patch:-half_patch, :] = xyz
115
+
116
+ # xyz_left_top = xyz_pad[:, :h, :w, :] # p1
117
+ # xyz_right_bottom = xyz_pad[:, -h:, -w:, :]# p9
118
+ # xyz_left_bottom = xyz_pad[:, -h:, :w, :] # p7
119
+ # xyz_right_top = xyz_pad[:, :h, -w:, :] # p3
120
+ # xyz_cross1 = xyz_left_top - xyz_right_bottom # p1p9
121
+ # xyz_cross2 = xyz_left_bottom - xyz_right_top # p7p3
122
+
123
+ xyz_left = xyz_pad[:, half_patch:half_patch + h, :w, :] # p4
124
+ xyz_right = xyz_pad[:, half_patch:half_patch + h, -w:, :] # p6
125
+ xyz_top = xyz_pad[:, :h, half_patch:half_patch + w, :] # p2
126
+ xyz_bottom = xyz_pad[:, -h:, half_patch:half_patch + w, :] # p8
127
+ xyz_horizon = xyz_left - xyz_right # p4p6
128
+ xyz_vertical = xyz_top - xyz_bottom # p2p8
129
+
130
+ xyz_left_in = xyz_pad[:, half_patch:half_patch + h, 1:w+1, :] # p4
131
+ xyz_right_in = xyz_pad[:, half_patch:half_patch + h, patch_size-1:patch_size-1+w, :] # p6
132
+ xyz_top_in = xyz_pad[:, 1:h+1, half_patch:half_patch + w, :] # p2
133
+ xyz_bottom_in = xyz_pad[:, patch_size-1:patch_size-1+h, half_patch:half_patch + w, :] # p8
134
+ xyz_horizon_in = xyz_left_in - xyz_right_in # p4p6
135
+ xyz_vertical_in = xyz_top_in - xyz_bottom_in # p2p8
136
+
137
+ n_img_1 = torch.cross(xyz_horizon_in, xyz_vertical_in, dim=3)
138
+ n_img_2 = torch.cross(xyz_horizon, xyz_vertical, dim=3)
139
+
140
+ # re-orient normals consistently
141
+ orient_mask = torch.sum(n_img_1 * xyz, dim=3) > 0
142
+ n_img_1[orient_mask] *= -1
143
+ orient_mask = torch.sum(n_img_2 * xyz, dim=3) > 0
144
+ n_img_2[orient_mask] *= -1
145
+
146
+ n_img1_L2 = torch.sqrt(torch.sum(n_img_1 ** 2, dim=3, keepdim=True))
147
+ n_img1_norm = n_img_1 / (n_img1_L2 + 1e-8)
148
+
149
+ n_img2_L2 = torch.sqrt(torch.sum(n_img_2 ** 2, dim=3, keepdim=True))
150
+ n_img2_norm = n_img_2 / (n_img2_L2 + 1e-8)
151
+
152
+ # average 2 norms
153
+ n_img_aver = n_img1_norm + n_img2_norm
154
+ n_img_aver_L2 = torch.sqrt(torch.sum(n_img_aver ** 2, dim=3, keepdim=True))
155
+ n_img_aver_norm = n_img_aver / (n_img_aver_L2 + 1e-8)
156
+ # re-orient normals consistently
157
+ orient_mask = torch.sum(n_img_aver_norm * xyz, dim=3) > 0
158
+ n_img_aver_norm[orient_mask] *= -1
159
+ n_img_aver_norm_out = n_img_aver_norm.permute((1, 2, 3, 0)) # [h, w, c, b]
160
+
161
+ # a = torch.sum(n_img1_norm_out*n_img2_norm_out, dim=2).cpu().numpy().squeeze()
162
+ # plt.imshow(np.abs(a), cmap='rainbow')
163
+ # plt.show()
164
+ return n_img_aver_norm_out#n_img1_norm.permute((1, 2, 3, 0))
165
+
166
+ def surface_normal_from_depth(depth, focal_length, valid_mask=None):
167
+ # para depth: depth map, [b, c, h, w]
168
+ b, c, h, w = depth.shape
169
+ focal_length = focal_length[:, None, None, None]
170
+ depth_filter = nn.functional.avg_pool2d(depth, kernel_size=3, stride=1, padding=1)
171
+ #depth_filter = nn.functional.avg_pool2d(depth_filter, kernel_size=3, stride=1, padding=1)
172
+ xyz = depth_to_xyz(depth_filter, focal_length)
173
+ sn_batch = []
174
+ for i in range(b):
175
+ xyz_i = xyz[i, :][None, :, :, :]
176
+ #normal = get_surface_normalv2(xyz_i)
177
+ normal = get_surface_normal(xyz_i)
178
+ sn_batch.append(normal)
179
+ sn_batch = torch.cat(sn_batch, dim=3).permute((3, 2, 0, 1)) # [b, c, h, w]
180
+
181
+ if valid_mask != None:
182
+ mask_invalid = (~valid_mask).repeat(1, 3, 1, 1)
183
+ sn_batch[mask_invalid] = 0.0
184
+
185
+ return sn_batch
186
+
geowizard/utils/depth_ensemble.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import numpy as np
4
+ import torch
5
+
6
+ from scipy.optimize import minimize
7
+
8
+ def inter_distances(tensors: torch.Tensor):
9
+ """
10
+ To calculate the distance between each two depth maps.
11
+ """
12
+ distances = []
13
+ for i, j in torch.combinations(torch.arange(tensors.shape[0])):
14
+ arr1 = tensors[i : i + 1]
15
+ arr2 = tensors[j : j + 1]
16
+ distances.append(arr1 - arr2)
17
+ dist = torch.concat(distances, dim=0)
18
+ return dist
19
+
20
+
21
+ def ensemble_depths(input_images:torch.Tensor,
22
+ regularizer_strength: float =0.02,
23
+ max_iter: int =2,
24
+ tol:float =1e-3,
25
+ reduction: str='median',
26
+ max_res: int=None):
27
+ """
28
+ To ensemble multiple affine-invariant depth images (up to scale and shift),
29
+ by aligning estimating the scale and shift
30
+ """
31
+
32
+ device = input_images.device
33
+ dtype = input_images.dtype
34
+ np_dtype = np.float32
35
+
36
+
37
+ original_input = input_images.clone()
38
+ n_img = input_images.shape[0]
39
+ ori_shape = input_images.shape
40
+
41
+ if max_res is not None:
42
+ scale_factor = torch.min(max_res / torch.tensor(ori_shape[-2:]))
43
+ if scale_factor < 1:
44
+ downscaler = torch.nn.Upsample(scale_factor=scale_factor, mode="nearest")
45
+ input_images = downscaler(torch.from_numpy(input_images)).numpy()
46
+
47
+ # init guess
48
+ _min = np.min(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1) # get the min value of each possible depth
49
+ _max = np.max(input_images.reshape((n_img, -1)).cpu().numpy(), axis=1) # get the max value of each possible depth
50
+ s_init = 1.0 / (_max - _min).reshape((-1, 1, 1)) #(10,1,1) : re-scale'f scale
51
+ t_init = (-1 * s_init.flatten() * _min.flatten()).reshape((-1, 1, 1)) #(10,1,1)
52
+
53
+ x = np.concatenate([s_init, t_init]).reshape(-1).astype(np_dtype) #(20,)
54
+
55
+ input_images = input_images.to(device)
56
+
57
+ # objective function
58
+ def closure(x):
59
+ l = len(x)
60
+ s = x[: int(l / 2)]
61
+ t = x[int(l / 2) :]
62
+ s = torch.from_numpy(s).to(dtype=dtype).to(device)
63
+ t = torch.from_numpy(t).to(dtype=dtype).to(device)
64
+
65
+ transformed_arrays = input_images * s.view((-1, 1, 1)) + t.view((-1, 1, 1))
66
+ dists = inter_distances(transformed_arrays)
67
+ sqrt_dist = torch.sqrt(torch.mean(dists**2))
68
+
69
+ if "mean" == reduction:
70
+ pred = torch.mean(transformed_arrays, dim=0)
71
+ elif "median" == reduction:
72
+ pred = torch.median(transformed_arrays, dim=0).values
73
+ else:
74
+ raise ValueError
75
+
76
+ near_err = torch.sqrt((0 - torch.min(pred)) ** 2)
77
+ far_err = torch.sqrt((1 - torch.max(pred)) ** 2)
78
+
79
+ err = sqrt_dist + (near_err + far_err) * regularizer_strength
80
+ err = err.detach().cpu().numpy().astype(np_dtype)
81
+ return err
82
+
83
+ res = minimize(
84
+ closure, x, method="BFGS", tol=tol, options={"maxiter": max_iter, "disp": False}
85
+ )
86
+ x = res.x
87
+ l = len(x)
88
+ s = x[: int(l / 2)]
89
+ t = x[int(l / 2) :]
90
+
91
+ # Prediction
92
+ s = torch.from_numpy(s).to(dtype=dtype).to(device)
93
+ t = torch.from_numpy(t).to(dtype=dtype).to(device)
94
+ transformed_arrays = original_input * s.view(-1, 1, 1) + t.view(-1, 1, 1) #[10,H,W]
95
+
96
+
97
+ if "mean" == reduction:
98
+ aligned_images = torch.mean(transformed_arrays, dim=0)
99
+ std = torch.std(transformed_arrays, dim=0)
100
+ uncertainty = std
101
+
102
+ elif "median" == reduction:
103
+ aligned_images = torch.median(transformed_arrays, dim=0).values
104
+ # MAD (median absolute deviation) as uncertainty indicator
105
+ abs_dev = torch.abs(transformed_arrays - aligned_images)
106
+ mad = torch.median(abs_dev, dim=0).values
107
+ uncertainty = mad
108
+
109
+ # Scale and shift to [0, 1]
110
+ _min = torch.min(aligned_images)
111
+ _max = torch.max(aligned_images)
112
+ aligned_images = (aligned_images - _min) / (_max - _min)
113
+ uncertainty /= _max - _min
114
+
115
+ return aligned_images, uncertainty
geowizard/utils/image_util.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import matplotlib
4
+ import numpy as np
5
+ import torch
6
+ from PIL import Image
7
+
8
+
9
+
10
+
11
+ def resize_max_res(img: Image.Image, max_edge_resolution: int) -> Image.Image:
12
+ """
13
+ Resize image to limit maximum edge length while keeping aspect ratio.
14
+ Args:
15
+ img (`Image.Image`):
16
+ Image to be resized.
17
+ max_edge_resolution (`int`):
18
+ Maximum edge length (pixel).
19
+ Returns:
20
+ `Image.Image`: Resized image.
21
+ """
22
+
23
+ original_width, original_height = img.size
24
+
25
+ downscale_factor = min(
26
+ max_edge_resolution / original_width, max_edge_resolution / original_height
27
+ )
28
+
29
+ new_width = int(original_width * downscale_factor)
30
+ new_height = int(original_height * downscale_factor)
31
+
32
+ resized_img = img.resize((new_width, new_height))
33
+ return resized_img
34
+
35
+
36
+ def colorize_depth_maps(
37
+ depth_map, min_depth, max_depth, cmap="Spectral", valid_mask=None
38
+ ):
39
+ """
40
+ Colorize depth maps.
41
+ """
42
+ assert len(depth_map.shape) >= 2, "Invalid dimension"
43
+
44
+ if isinstance(depth_map, torch.Tensor):
45
+ depth = depth_map.detach().clone().squeeze().numpy()
46
+ elif isinstance(depth_map, np.ndarray):
47
+ depth = depth_map.copy().squeeze()
48
+ # reshape to [ (B,) H, W ]
49
+ if depth.ndim < 3:
50
+ depth = depth[np.newaxis, :, :]
51
+
52
+ # colorize
53
+ cm = matplotlib.colormaps[cmap]
54
+ depth = ((depth - min_depth) / (max_depth - min_depth)).clip(0, 1)
55
+ img_colored_np = cm(depth, bytes=False)[:, :, :, 0:3] # value from 0 to 1
56
+ img_colored_np = np.rollaxis(img_colored_np, 3, 1)
57
+
58
+ if valid_mask is not None:
59
+ if isinstance(depth_map, torch.Tensor):
60
+ valid_mask = valid_mask.detach().numpy()
61
+ valid_mask = valid_mask.squeeze() # [H, W] or [B, H, W]
62
+ if valid_mask.ndim < 3:
63
+ valid_mask = valid_mask[np.newaxis, np.newaxis, :, :]
64
+ else:
65
+ valid_mask = valid_mask[:, np.newaxis, :, :]
66
+ valid_mask = np.repeat(valid_mask, 3, axis=1)
67
+ img_colored_np[~valid_mask] = 0
68
+
69
+ if isinstance(depth_map, torch.Tensor):
70
+ img_colored = torch.from_numpy(img_colored_np).float()
71
+ elif isinstance(depth_map, np.ndarray):
72
+ img_colored = img_colored_np
73
+
74
+ return img_colored
75
+
76
+
77
+ def chw2hwc(chw):
78
+ assert 3 == len(chw.shape)
79
+ if isinstance(chw, torch.Tensor):
80
+ hwc = torch.permute(chw, (1, 2, 0))
81
+ elif isinstance(chw, np.ndarray):
82
+ hwc = np.moveaxis(chw, 0, -1)
83
+ return hwc
geowizard/utils/normal_ensemble.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import numpy as np
4
+ import torch
5
+
6
+ def ensemble_normals(input_images:torch.Tensor):
7
+ normal_preds = input_images
8
+
9
+ bsz, d, h, w = normal_preds.shape
10
+ normal_preds = normal_preds / (torch.norm(normal_preds, p=2, dim=1).unsqueeze(1)+1e-5)
11
+
12
+ phi = torch.atan2(normal_preds[:,1,:,:], normal_preds[:,0,:,:]).mean(dim=0)
13
+ theta = torch.atan2(torch.norm(normal_preds[:,:2,:,:], p=2, dim=1), normal_preds[:,2,:,:]).mean(dim=0)
14
+ normal_pred = torch.zeros((d,h,w)).to(normal_preds)
15
+ normal_pred[0,:,:] = torch.sin(theta) * torch.cos(phi)
16
+ normal_pred[1,:,:] = torch.sin(theta) * torch.sin(phi)
17
+ normal_pred[2,:,:] = torch.cos(theta)
18
+
19
+ angle_error = torch.acos(torch.cosine_similarity(normal_pred[None], normal_preds, dim=1))
20
+ normal_idx = torch.argmin(angle_error.reshape(bsz,-1).sum(-1))
21
+
22
+ return normal_preds[normal_idx]
geowizard/utils/seed_all.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 Bingxin Ke, ETH Zurich. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ # --------------------------------------------------------------------------
15
+ # If you find this code useful, we kindly ask you to cite our paper in your work.
16
+ # Please find bibtex at: https://github.com/prs-eth/Marigold#-citation
17
+ # More information about the method can be found at https://marigoldmonodepth.github.io
18
+ # --------------------------------------------------------------------------
19
+
20
+
21
+ import numpy as np
22
+ import random
23
+ import torch
24
+
25
+
26
+ def seed_all(seed: int = 0):
27
+ """
28
+ Set random seeds of all components.
29
+ """
30
+ random.seed(seed)
31
+ np.random.seed(seed)
32
+ torch.manual_seed(seed)
33
+ torch.cuda.manual_seed_all(seed)
geowizard/utils/surface_normal.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A reimplemented version in public environments by Xiao Fu and Mu Hu
2
+
3
+ import torch
4
+ import numpy as np
5
+ import torch.nn as nn
6
+
7
+
8
+ def init_image_coor(height, width):
9
+ x_row = np.arange(0, width)
10
+ x = np.tile(x_row, (height, 1))
11
+ x = x[np.newaxis, :, :]
12
+ x = x.astype(np.float32)
13
+ x = torch.from_numpy(x.copy()).cuda()
14
+ u_u0 = x - width/2.0
15
+
16
+ y_col = np.arange(0, height) # y_col = np.arange(0, height)
17
+ y = np.tile(y_col, (width, 1)).T
18
+ y = y[np.newaxis, :, :]
19
+ y = y.astype(np.float32)
20
+ y = torch.from_numpy(y.copy()).cuda()
21
+ v_v0 = y - height/2.0
22
+ return u_u0, v_v0
23
+
24
+
25
+ def depth_to_xyz(depth, focal_length):
26
+ b, c, h, w = depth.shape
27
+ u_u0, v_v0 = init_image_coor(h, w)
28
+ x = u_u0 * depth / focal_length
29
+ y = v_v0 * depth / focal_length
30
+ z = depth
31
+ pw = torch.cat([x, y, z], 1).permute(0, 2, 3, 1) # [b, h, w, c]
32
+ return pw
33
+
34
+
35
+ def get_surface_normal(xyz, patch_size=3):
36
+ # xyz: [1, h, w, 3]
37
+ x, y, z = torch.unbind(xyz, dim=3)
38
+ x = torch.unsqueeze(x, 0)
39
+ y = torch.unsqueeze(y, 0)
40
+ z = torch.unsqueeze(z, 0)
41
+
42
+ xx = x * x
43
+ yy = y * y
44
+ zz = z * z
45
+ xy = x * y
46
+ xz = x * z
47
+ yz = y * z
48
+ patch_weight = torch.ones((1, 1, patch_size, patch_size), requires_grad=False).cuda()
49
+ xx_patch = nn.functional.conv2d(xx, weight=patch_weight, padding=int(patch_size / 2))
50
+ yy_patch = nn.functional.conv2d(yy, weight=patch_weight, padding=int(patch_size / 2))
51
+ zz_patch = nn.functional.conv2d(zz, weight=patch_weight, padding=int(patch_size / 2))
52
+ xy_patch = nn.functional.conv2d(xy, weight=patch_weight, padding=int(patch_size / 2))
53
+ xz_patch = nn.functional.conv2d(xz, weight=patch_weight, padding=int(patch_size / 2))
54
+ yz_patch = nn.functional.conv2d(yz, weight=patch_weight, padding=int(patch_size / 2))
55
+ ATA = torch.stack([xx_patch, xy_patch, xz_patch, xy_patch, yy_patch, yz_patch, xz_patch, yz_patch, zz_patch],
56
+ dim=4)
57
+ ATA = torch.squeeze(ATA)
58
+ ATA = torch.reshape(ATA, (ATA.size(0), ATA.size(1), 3, 3))
59
+ eps_identity = 1e-6 * torch.eye(3, device=ATA.device, dtype=ATA.dtype)[None, None, :, :].repeat([ATA.size(0), ATA.size(1), 1, 1])
60
+ ATA = ATA + eps_identity
61
+ x_patch = nn.functional.conv2d(x, weight=patch_weight, padding=int(patch_size / 2))
62
+ y_patch = nn.functional.conv2d(y, weight=patch_weight, padding=int(patch_size / 2))
63
+ z_patch = nn.functional.conv2d(z, weight=patch_weight, padding=int(patch_size / 2))
64
+ AT1 = torch.stack([x_patch, y_patch, z_patch], dim=4)
65
+ AT1 = torch.squeeze(AT1)
66
+ AT1 = torch.unsqueeze(AT1, 3)
67
+
68
+ patch_num = 4
69
+ patch_x = int(AT1.size(1) / patch_num)
70
+ patch_y = int(AT1.size(0) / patch_num)
71
+ n_img = torch.randn(AT1.shape).cuda()
72
+ overlap = patch_size // 2 + 1
73
+ for x in range(int(patch_num)):
74
+ for y in range(int(patch_num)):
75
+ left_flg = 0 if x == 0 else 1
76
+ right_flg = 0 if x == patch_num -1 else 1
77
+ top_flg = 0 if y == 0 else 1
78
+ btm_flg = 0 if y == patch_num - 1 else 1
79
+ at1 = AT1[y * patch_y - top_flg * overlap:(y + 1) * patch_y + btm_flg * overlap,
80
+ x * patch_x - left_flg * overlap:(x + 1) * patch_x + right_flg * overlap]
81
+ ata = ATA[y * patch_y - top_flg * overlap:(y + 1) * patch_y + btm_flg * overlap,
82
+ x * patch_x - left_flg * overlap:(x + 1) * patch_x + right_flg * overlap]
83
+ n_img_tmp, _ = torch.solve(at1, ata)
84
+
85
+ n_img_tmp_select = n_img_tmp[top_flg * overlap:patch_y + top_flg * overlap, left_flg * overlap:patch_x + left_flg * overlap, :, :]
86
+ n_img[y * patch_y:y * patch_y + patch_y, x * patch_x:x * patch_x + patch_x, :, :] = n_img_tmp_select
87
+
88
+ n_img_L2 = torch.sqrt(torch.sum(n_img ** 2, dim=2, keepdim=True))
89
+ n_img_norm = n_img / n_img_L2
90
+
91
+ # re-orient normals consistently
92
+ orient_mask = torch.sum(torch.squeeze(n_img_norm) * torch.squeeze(xyz), dim=2) > 0
93
+ n_img_norm[orient_mask] *= -1
94
+ return n_img_norm
95
+
96
+ def get_surface_normalv2(xyz, patch_size=3):
97
+ """
98
+ xyz: xyz coordinates
99
+ patch: [p1, p2, p3,
100
+ p4, p5, p6,
101
+ p7, p8, p9]
102
+ surface_normal = [(p9-p1) x (p3-p7)] + [(p6-p4) - (p8-p2)]
103
+ return: normal [h, w, 3, b]
104
+ """
105
+ b, h, w, c = xyz.shape
106
+ half_patch = patch_size // 2
107
+ xyz_pad = torch.zeros((b, h + patch_size - 1, w + patch_size - 1, c), dtype=xyz.dtype, device=xyz.device)
108
+ xyz_pad[:, half_patch:-half_patch, half_patch:-half_patch, :] = xyz
109
+
110
+ # xyz_left_top = xyz_pad[:, :h, :w, :] # p1
111
+ # xyz_right_bottom = xyz_pad[:, -h:, -w:, :]# p9
112
+ # xyz_left_bottom = xyz_pad[:, -h:, :w, :] # p7
113
+ # xyz_right_top = xyz_pad[:, :h, -w:, :] # p3
114
+ # xyz_cross1 = xyz_left_top - xyz_right_bottom # p1p9
115
+ # xyz_cross2 = xyz_left_bottom - xyz_right_top # p7p3
116
+
117
+ xyz_left = xyz_pad[:, half_patch:half_patch + h, :w, :] # p4
118
+ xyz_right = xyz_pad[:, half_patch:half_patch + h, -w:, :] # p6
119
+ xyz_top = xyz_pad[:, :h, half_patch:half_patch + w, :] # p2
120
+ xyz_bottom = xyz_pad[:, -h:, half_patch:half_patch + w, :] # p8
121
+ xyz_horizon = xyz_left - xyz_right # p4p6
122
+ xyz_vertical = xyz_top - xyz_bottom # p2p8
123
+
124
+ xyz_left_in = xyz_pad[:, half_patch:half_patch + h, 1:w+1, :] # p4
125
+ xyz_right_in = xyz_pad[:, half_patch:half_patch + h, patch_size-1:patch_size-1+w, :] # p6
126
+ xyz_top_in = xyz_pad[:, 1:h+1, half_patch:half_patch + w, :] # p2
127
+ xyz_bottom_in = xyz_pad[:, patch_size-1:patch_size-1+h, half_patch:half_patch + w, :] # p8
128
+ xyz_horizon_in = xyz_left_in - xyz_right_in # p4p6
129
+ xyz_vertical_in = xyz_top_in - xyz_bottom_in # p2p8
130
+
131
+ n_img_1 = torch.cross(xyz_horizon_in, xyz_vertical_in, dim=3)
132
+ n_img_2 = torch.cross(xyz_horizon, xyz_vertical, dim=3)
133
+
134
+ # re-orient normals consistently
135
+ orient_mask = torch.sum(n_img_1 * xyz, dim=3) > 0
136
+ n_img_1[orient_mask] *= -1
137
+ orient_mask = torch.sum(n_img_2 * xyz, dim=3) > 0
138
+ n_img_2[orient_mask] *= -1
139
+
140
+ n_img1_L2 = torch.sqrt(torch.sum(n_img_1 ** 2, dim=3, keepdim=True))
141
+ n_img1_norm = n_img_1 / (n_img1_L2 + 1e-8)
142
+
143
+ n_img2_L2 = torch.sqrt(torch.sum(n_img_2 ** 2, dim=3, keepdim=True))
144
+ n_img2_norm = n_img_2 / (n_img2_L2 + 1e-8)
145
+
146
+ # average 2 norms
147
+ n_img_aver = n_img1_norm + n_img2_norm
148
+ n_img_aver_L2 = torch.sqrt(torch.sum(n_img_aver ** 2, dim=3, keepdim=True))
149
+ n_img_aver_norm = n_img_aver / (n_img_aver_L2 + 1e-8)
150
+ # re-orient normals consistently
151
+ orient_mask = torch.sum(n_img_aver_norm * xyz, dim=3) > 0
152
+ n_img_aver_norm[orient_mask] *= -1
153
+ n_img_aver_norm_out = n_img_aver_norm.permute((1, 2, 3, 0)) # [h, w, c, b]
154
+
155
+ # a = torch.sum(n_img1_norm_out*n_img2_norm_out, dim=2).cpu().numpy().squeeze()
156
+ # plt.imshow(np.abs(a), cmap='rainbow')
157
+ # plt.show()
158
+ return n_img_aver_norm_out#n_img1_norm.permute((1, 2, 3, 0))
159
+
160
+ def surface_normal_from_depth(depth, focal_length, valid_mask=None):
161
+ # para depth: depth map, [b, c, h, w]
162
+ b, c, h, w = depth.shape
163
+ focal_length = focal_length[:, None, None, None]
164
+ depth_filter = nn.functional.avg_pool2d(depth, kernel_size=3, stride=1, padding=1)
165
+ depth_filter = nn.functional.avg_pool2d(depth_filter, kernel_size=3, stride=1, padding=1)
166
+ xyz = depth_to_xyz(depth_filter, focal_length)
167
+ sn_batch = []
168
+ for i in range(b):
169
+ xyz_i = xyz[i, :][None, :, :, :]
170
+ normal = get_surface_normalv2(xyz_i)
171
+ sn_batch.append(normal)
172
+ sn_batch = torch.cat(sn_batch, dim=3).permute((3, 2, 0, 1)) # [b, c, h, w]
173
+ mask_invalid = (~valid_mask).repeat(1, 3, 1, 1)
174
+ sn_batch[mask_invalid] = 0.0
175
+
176
+ return sn_batch
177
+
178
+
179
+ def vis_normal(normal):
180
+ """
181
+ Visualize surface normal. Transfer surface normal value from [-1, 1] to [0, 255]
182
+ @para normal: surface normal, [h, w, 3], numpy.array
183
+ """
184
+ n_img_L2 = np.sqrt(np.sum(normal ** 2, axis=2, keepdims=True))
185
+ n_img_norm = normal / (n_img_L2 + 1e-8)
186
+ normal_vis = n_img_norm * 127
187
+ normal_vis += 128
188
+ normal_vis = normal_vis.astype(np.uint8)
189
+ return normal_vis
190
+
191
+ def vis_normal2(normals):
192
+ '''
193
+ Montage of normal maps. Vectors are unit length and backfaces thresholded.
194
+ '''
195
+ x = normals[:, :, 0] # horizontal; pos right
196
+ y = normals[:, :, 1] # depth; pos far
197
+ z = normals[:, :, 2] # vertical; pos up
198
+ backfacing = (z > 0)
199
+ norm = np.sqrt(np.sum(normals**2, axis=2))
200
+ zero = (norm < 1e-5)
201
+ x += 1.0; x *= 0.5
202
+ y += 1.0; y *= 0.5
203
+ z = np.abs(z)
204
+ x[zero] = 0.0
205
+ y[zero] = 0.0
206
+ z[zero] = 0.0
207
+ normals[:, :, 0] = x # horizontal; pos right
208
+ normals[:, :, 1] = y # depth; pos far
209
+ normals[:, :, 2] = z # vertical; pos up
210
+ return normals
211
+
212
+ if __name__ == '__main__':
213
+ import cv2, os
requirements.txt ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate==0.30.1
2
+ aiofiles==23.2.1
3
+ aiohttp==3.9.5
4
+ aiosignal==1.3.1
5
+ altair==5.3.0
6
+ annotated-types==0.7.0
7
+ anyio==4.4.0
8
+ async-timeout==4.0.3
9
+ attrs==23.2.0
10
+ Authlib==1.3.0
11
+ certifi==2024.2.2
12
+ cffi==1.16.0
13
+ charset-normalizer==3.3.2
14
+ click==8.0.4
15
+ contourpy==1.2.1
16
+ cryptography==42.0.7
17
+ cycler==0.12.1
18
+ dataclasses-json==0.6.6
19
+ datasets==2.19.1
20
+ Deprecated==1.2.14
21
+ diffusers==0.28.0
22
+ dill==0.3.8
23
+ dnspython==2.6.1
24
+ email_validator==2.1.1
25
+ exceptiongroup==1.2.1
26
+ fastapi==0.111.0
27
+ fastapi-cli==0.0.4
28
+ ffmpy==0.3.2
29
+ filelock==3.14.0
30
+ fonttools==4.53.0
31
+ frozenlist==1.4.1
32
+ fsspec==2024.3.1
33
+ gradio==4.32.2
34
+ gradio_client==0.17.0
35
+ gradio_imageslider==0.0.20
36
+ h11==0.14.0
37
+ httpcore==1.0.5
38
+ httptools==0.6.1
39
+ httpx==0.27.0
40
+ huggingface-hub==0.23.0
41
+ idna==3.7
42
+ imageio==2.34.1
43
+ imageio-ffmpeg==0.5.0
44
+ importlib_metadata==7.1.0
45
+ importlib_resources==6.4.0
46
+ itsdangerous==2.2.0
47
+ Jinja2==3.1.4
48
+ jsonschema==4.22.0
49
+ jsonschema-specifications==2023.12.1
50
+ kiwisolver==1.4.5
51
+ markdown-it-py==3.0.0
52
+ MarkupSafe==2.1.5
53
+ marshmallow==3.21.2
54
+ matplotlib==3.8.2
55
+ mdurl==0.1.2
56
+ mpmath==1.3.0
57
+ multidict==6.0.5
58
+ multiprocess==0.70.16
59
+ mypy-extensions==1.0.0
60
+ networkx==3.3
61
+ numpy==1.26.4
62
+ nvidia-cublas-cu12==12.1.3.1
63
+ nvidia-cuda-cupti-cu12==12.1.105
64
+ nvidia-cuda-nvrtc-cu12==12.1.105
65
+ nvidia-cuda-runtime-cu12==12.1.105
66
+ nvidia-cudnn-cu12==8.9.2.26
67
+ nvidia-cufft-cu12==11.0.2.54
68
+ nvidia-curand-cu12==10.3.2.106
69
+ nvidia-cusolver-cu12==11.4.5.107
70
+ nvidia-cusparse-cu12==12.1.0.106
71
+ nvidia-nccl-cu12==2.19.3
72
+ nvidia-nvjitlink-cu12==12.5.40
73
+ nvidia-nvtx-cu12==12.1.105
74
+ orjson==3.10.3
75
+ packaging==24.0
76
+ pandas==2.2.2
77
+ pillow==10.3.0
78
+ protobuf==3.20.3
79
+ psutil==5.9.8
80
+ pyarrow==16.0.0
81
+ pyarrow-hotfix==0.6
82
+ pycparser==2.22
83
+ pydantic==2.7.2
84
+ pydantic_core==2.18.3
85
+ pydub==0.25.1
86
+ pygltflib==1.16.1
87
+ Pygments==2.18.0
88
+ pyparsing==3.1.2
89
+ python-dateutil==2.9.0.post0
90
+ python-dotenv==1.0.1
91
+ python-multipart==0.0.9
92
+ pytz==2024.1
93
+ PyYAML==6.0.1
94
+ referencing==0.35.1
95
+ regex==2024.5.15
96
+ requests==2.31.0
97
+ rich==13.7.1
98
+ rpds-py==0.18.1
99
+ ruff==0.4.7
100
+ safetensors==0.4.3
101
+ scipy==1.11.4
102
+ semantic-version==2.10.0
103
+ shellingham==1.5.4
104
+ six==1.16.0
105
+ sniffio==1.3.1
106
+ spaces==0.28.3
107
+ starlette==0.37.2
108
+ sympy==1.12.1
109
+ tokenizers==0.15.2
110
+ tomlkit==0.12.0
111
+ toolz==0.12.1
112
+ torch==2.2.0
113
+ tqdm==4.66.4
114
+ transformers==4.36.1
115
+ trimesh==4.0.5
116
+ triton==2.2.0
117
+ typer==0.12.3
118
+ typing-inspect==0.9.0
119
+ typing_extensions==4.11.0
120
+ tzdata==2024.1
121
+ ujson==5.10.0
122
+ urllib3==2.2.1
123
+ uvicorn==0.30.0
124
+ uvloop==0.19.0
125
+ watchfiles==0.22.0
126
+ websockets==11.0.3
127
+ wrapt==1.16.0
128
+ xformers==0.0.24
129
+ xxhash==3.4.1
130
+ yarl==1.9.4
131
+ zipp==3.19.1
132
+ einops==0.7.0
133
+ opencv-python-headless==4.8.1
requirements_min.txt ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ gradio>=4.32.1
2
+ gradio-imageslider>=0.0.20
3
+ pygltflib==1.16.1
4
+ trimesh==4.0.5
5
+ imageio
6
+ imageio-ffmpeg
7
+ Pillow
8
+ einops==0.7.0
9
+
10
+ spaces
11
+ accelerate
12
+ diffusers>=0.28.0
13
+ matplotlib==3.8.2
14
+ scipy==1.11.4
15
+ torch==2.0.1
16
+ transformers==4.36.1
17
+ xformers==0.0.21
18
+ opencv-python-headless==4.8.1
stablenormal/__init__.py ADDED
File without changes
stablenormal/pipeline_stablenormal.py ADDED
@@ -0,0 +1,1279 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Marigold authors, PRS ETH Zurich. All rights reserved.
2
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # --------------------------------------------------------------------------
16
+ # More information and citation instructions are available on the
17
+ # --------------------------------------------------------------------------
18
+ from dataclasses import dataclass
19
+ from typing import Any, Dict, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ from PIL import Image
24
+ from tqdm.auto import tqdm
25
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
26
+
27
+
28
+ from diffusers.image_processor import PipelineImageInput
29
+ from diffusers.models import (
30
+ AutoencoderKL,
31
+ UNet2DConditionModel,
32
+ ControlNetModel,
33
+ )
34
+ from diffusers.schedulers import (
35
+ DDIMScheduler
36
+ )
37
+
38
+ from diffusers.utils import (
39
+ BaseOutput,
40
+ logging,
41
+ replace_example_docstring,
42
+ )
43
+
44
+ from diffusers.models.unets.unet_2d_condition import UNet2DConditionOutput
45
+
46
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
47
+
48
+
49
+
50
+ from diffusers.utils.torch_utils import randn_tensor
51
+ from diffusers.pipelines.controlnet import StableDiffusionControlNetPipeline
52
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
53
+ from diffusers.pipelines.marigold.marigold_image_processing import MarigoldImageProcessor
54
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
55
+ import torch.nn.functional as F
56
+
57
+ import pdb
58
+
59
+
60
+
61
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
62
+
63
+
64
+ EXAMPLE_DOC_STRING = """
65
+ Examples:
66
+ ```py
67
+ >>> import diffusers
68
+ >>> import torch
69
+
70
+ >>> pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(
71
+ ... "prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16
72
+ ... ).to("cuda")
73
+
74
+ >>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
75
+ >>> normals = pipe(image)
76
+
77
+ >>> vis = pipe.image_processor.visualize_normals(normals.prediction)
78
+ >>> vis[0].save("einstein_normals.png")
79
+ ```
80
+ """
81
+
82
+
83
+ @dataclass
84
+ class StableNormalOutput(BaseOutput):
85
+ """
86
+ Output class for Marigold monocular normals prediction pipeline.
87
+
88
+ Args:
89
+ prediction (`np.ndarray`, `torch.Tensor`):
90
+ Predicted normals with values in the range [-1, 1]. The shape is always $numimages \times 3 \times height
91
+ \times width$, regardless of whether the images were passed as a 4D array or a list.
92
+ uncertainty (`None`, `np.ndarray`, `torch.Tensor`):
93
+ Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is $numimages
94
+ \times 1 \times height \times width$.
95
+ latent (`None`, `torch.Tensor`):
96
+ Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline.
97
+ The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$.
98
+ """
99
+
100
+ prediction: Union[np.ndarray, torch.Tensor]
101
+ latent: Union[None, torch.Tensor]
102
+ gaus_noise: Union[None, torch.Tensor]
103
+
104
+ from einops import rearrange
105
+ class DINOv2_Encoder(torch.nn.Module):
106
+ IMAGENET_DEFAULT_MEAN = [0.485, 0.456, 0.406]
107
+ IMAGENET_DEFAULT_STD = [0.229, 0.224, 0.225]
108
+
109
+ def __init__(
110
+ self,
111
+ model_name = 'dinov2_vitl14',
112
+ freeze = True,
113
+ antialias=True,
114
+ device="cuda",
115
+ size = 448,
116
+ ):
117
+ super(DINOv2_Encoder, self).__init__()
118
+
119
+ self.model = torch.hub.load('facebookresearch/dinov2', model_name)
120
+ self.model.eval().to(device)
121
+ self.device = device
122
+ self.antialias = antialias
123
+ self.dtype = torch.float32
124
+
125
+ self.mean = torch.Tensor(self.IMAGENET_DEFAULT_MEAN)
126
+ self.std = torch.Tensor(self.IMAGENET_DEFAULT_STD)
127
+ self.size = size
128
+ if freeze:
129
+ self.freeze()
130
+
131
+
132
+ def freeze(self):
133
+ for param in self.model.parameters():
134
+ param.requires_grad = False
135
+
136
+ @torch.no_grad()
137
+ def encoder(self, x):
138
+ '''
139
+ x: [b h w c], range from (-1, 1), rbg
140
+ '''
141
+
142
+ x = self.preprocess(x).to(self.device, self.dtype)
143
+
144
+ b, c, h, w = x.shape
145
+ patch_h, patch_w = h // 14, w // 14
146
+
147
+ embeddings = self.model.forward_features(x)['x_norm_patchtokens']
148
+ embeddings = rearrange(embeddings, 'b (h w) c -> b h w c', h = patch_h, w = patch_w)
149
+
150
+ return rearrange(embeddings, 'b h w c -> b c h w')
151
+
152
+ def preprocess(self, x):
153
+ ''' x
154
+ '''
155
+ # normalize to [0,1],
156
+ x = torch.nn.functional.interpolate(
157
+ x,
158
+ size=(self.size, self.size),
159
+ mode='bicubic',
160
+ align_corners=True,
161
+ antialias=self.antialias,
162
+ )
163
+
164
+ x = (x + 1.0) / 2.0
165
+ # renormalize according to dino
166
+ mean = self.mean.view(1, 3, 1, 1).to(x.device)
167
+ std = self.std.view(1, 3, 1, 1).to(x.device)
168
+ x = (x - mean) / std
169
+
170
+ return x
171
+
172
+ def to(self, device, dtype=None):
173
+ if dtype is not None:
174
+ self.dtype = dtype
175
+ self.model.to(device, dtype)
176
+ self.mean.to(device, dtype)
177
+ self.std.to(device, dtype)
178
+ else:
179
+ self.model.to(device)
180
+ self.mean.to(device)
181
+ self.std.to(device)
182
+ return self
183
+
184
+ def __call__(self, x, **kwargs):
185
+ return self.encoder(x, **kwargs)
186
+
187
+ class StableNormalPipeline(StableDiffusionControlNetPipeline):
188
+ """ Pipeline for monocular normals estimation using the Marigold method: https://marigoldmonodepth.github.io.
189
+ Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.
190
+
191
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
192
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
193
+
194
+ The pipeline also inherits the following loading methods:
195
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
196
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
197
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
198
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
199
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
200
+
201
+ Args:
202
+ vae ([`AutoencoderKL`]):
203
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
204
+ text_encoder ([`~transformers.CLIPTextModel`]):
205
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
206
+ tokenizer ([`~transformers.CLIPTokenizer`]):
207
+ A `CLIPTokenizer` to tokenize text.
208
+ unet ([`UNet2DConditionModel`]):
209
+ A `UNet2DConditionModel` to denoise the encoded image latents.
210
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
211
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
212
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
213
+ additional conditioning.
214
+ scheduler ([`SchedulerMixin`]):
215
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
216
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
217
+ safety_checker ([`StableDiffusionSafetyChecker`]):
218
+ Classification module that estimates whether generated images could be considered offensive or harmful.
219
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
220
+ about a model's potential harms.
221
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
222
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
223
+ """
224
+
225
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
226
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
227
+ _exclude_from_cpu_offload = ["safety_checker"]
228
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
229
+
230
+
231
+
232
+ def __init__(
233
+ self,
234
+ vae: AutoencoderKL,
235
+ text_encoder: CLIPTextModel,
236
+ tokenizer: CLIPTokenizer,
237
+ unet: UNet2DConditionModel,
238
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]],
239
+ dino_controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]],
240
+ scheduler: Union[DDIMScheduler],
241
+ safety_checker: StableDiffusionSafetyChecker,
242
+ feature_extractor: CLIPImageProcessor,
243
+ image_encoder: CLIPVisionModelWithProjection = None,
244
+ requires_safety_checker: bool = True,
245
+ default_denoising_steps: Optional[int] = 10,
246
+ default_processing_resolution: Optional[int] = 768,
247
+ prompt="The normal map",
248
+ empty_text_embedding=None,
249
+ ):
250
+ super().__init__(
251
+ vae,
252
+ text_encoder,
253
+ tokenizer,
254
+ unet,
255
+ controlnet,
256
+ scheduler,
257
+ safety_checker,
258
+ feature_extractor,
259
+ image_encoder,
260
+ requires_safety_checker,
261
+ )
262
+
263
+ self.register_modules(
264
+ dino_controlnet=dino_controlnet,
265
+ )
266
+
267
+ self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor)
268
+ self.dino_image_processor = lambda x: x / 127.5 -1.
269
+
270
+ self.default_denoising_steps = default_denoising_steps
271
+ self.default_processing_resolution = default_processing_resolution
272
+ self.prompt = prompt
273
+ self.prompt_embeds = None
274
+ self.empty_text_embedding = empty_text_embedding
275
+ self.prior = DINOv2_Encoder(size=672)
276
+
277
+ def check_inputs(
278
+ self,
279
+ image: PipelineImageInput,
280
+ num_inference_steps: int,
281
+ ensemble_size: int,
282
+ processing_resolution: int,
283
+ resample_method_input: str,
284
+ resample_method_output: str,
285
+ batch_size: int,
286
+ ensembling_kwargs: Optional[Dict[str, Any]],
287
+ latents: Optional[torch.Tensor],
288
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]],
289
+ output_type: str,
290
+ output_uncertainty: bool,
291
+ ) -> int:
292
+ if num_inference_steps is None:
293
+ raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.")
294
+ if num_inference_steps < 1:
295
+ raise ValueError("`num_inference_steps` must be positive.")
296
+ if ensemble_size < 1:
297
+ raise ValueError("`ensemble_size` must be positive.")
298
+ if ensemble_size == 2:
299
+ logger.warning(
300
+ "`ensemble_size` == 2 results are similar to no ensembling (1); "
301
+ "consider increasing the value to at least 3."
302
+ )
303
+ if ensemble_size == 1 and output_uncertainty:
304
+ raise ValueError(
305
+ "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` "
306
+ "greater than 1."
307
+ )
308
+ if processing_resolution is None:
309
+ raise ValueError(
310
+ "`processing_resolution` is not specified and could not be resolved from the model config."
311
+ )
312
+ if processing_resolution < 0:
313
+ raise ValueError(
314
+ "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for "
315
+ "downsampled processing."
316
+ )
317
+ if processing_resolution % self.vae_scale_factor != 0:
318
+ raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.")
319
+ if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"):
320
+ raise ValueError(
321
+ "`resample_method_input` takes string values compatible with PIL library: "
322
+ "nearest, nearest-exact, bilinear, bicubic, area."
323
+ )
324
+ if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"):
325
+ raise ValueError(
326
+ "`resample_method_output` takes string values compatible with PIL library: "
327
+ "nearest, nearest-exact, bilinear, bicubic, area."
328
+ )
329
+ if batch_size < 1:
330
+ raise ValueError("`batch_size` must be positive.")
331
+ if output_type not in ["pt", "np"]:
332
+ raise ValueError("`output_type` must be one of `pt` or `np`.")
333
+ if latents is not None and generator is not None:
334
+ raise ValueError("`latents` and `generator` cannot be used together.")
335
+ if ensembling_kwargs is not None:
336
+ if not isinstance(ensembling_kwargs, dict):
337
+ raise ValueError("`ensembling_kwargs` must be a dictionary.")
338
+ if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("closest", "mean"):
339
+ raise ValueError("`ensembling_kwargs['reduction']` can be either `'closest'` or `'mean'`.")
340
+
341
+ # image checks
342
+ num_images = 0
343
+ W, H = None, None
344
+ if not isinstance(image, list):
345
+ image = [image]
346
+ for i, img in enumerate(image):
347
+ if isinstance(img, np.ndarray) or torch.is_tensor(img):
348
+ if img.ndim not in (2, 3, 4):
349
+ raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.")
350
+ H_i, W_i = img.shape[-2:]
351
+ N_i = 1
352
+ if img.ndim == 4:
353
+ N_i = img.shape[0]
354
+ elif isinstance(img, Image.Image):
355
+ W_i, H_i = img.size
356
+ N_i = 1
357
+ else:
358
+ raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.")
359
+ if W is None:
360
+ W, H = W_i, H_i
361
+ elif (W, H) != (W_i, H_i):
362
+ raise ValueError(
363
+ f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}"
364
+ )
365
+ num_images += N_i
366
+
367
+ # latents checks
368
+ if latents is not None:
369
+ if not torch.is_tensor(latents):
370
+ raise ValueError("`latents` must be a torch.Tensor.")
371
+ if latents.dim() != 4:
372
+ raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.")
373
+
374
+ if processing_resolution > 0:
375
+ max_orig = max(H, W)
376
+ new_H = H * processing_resolution // max_orig
377
+ new_W = W * processing_resolution // max_orig
378
+ if new_H == 0 or new_W == 0:
379
+ raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]")
380
+ W, H = new_W, new_H
381
+ w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor
382
+ h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor
383
+ shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w)
384
+
385
+ if latents.shape != shape_expected:
386
+ raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.")
387
+
388
+ # generator checks
389
+ if generator is not None:
390
+ if isinstance(generator, list):
391
+ if len(generator) != num_images * ensemble_size:
392
+ raise ValueError(
393
+ "The number of generators must match the total number of ensemble members for all input images."
394
+ )
395
+ if not all(g.device.type == generator[0].device.type for g in generator):
396
+ raise ValueError("`generator` device placement is not consistent in the list.")
397
+ elif not isinstance(generator, torch.Generator):
398
+ raise ValueError(f"Unsupported generator type: {type(generator)}.")
399
+
400
+ return num_images
401
+
402
+ def progress_bar(self, iterable=None, total=None, desc=None, leave=True):
403
+ if not hasattr(self, "_progress_bar_config"):
404
+ self._progress_bar_config = {}
405
+ elif not isinstance(self._progress_bar_config, dict):
406
+ raise ValueError(
407
+ f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
408
+ )
409
+
410
+ progress_bar_config = dict(**self._progress_bar_config)
411
+ progress_bar_config["desc"] = progress_bar_config.get("desc", desc)
412
+ progress_bar_config["leave"] = progress_bar_config.get("leave", leave)
413
+ if iterable is not None:
414
+ return tqdm(iterable, **progress_bar_config)
415
+ elif total is not None:
416
+ return tqdm(total=total, **progress_bar_config)
417
+ else:
418
+ raise ValueError("Either `total` or `iterable` has to be defined.")
419
+
420
+ @torch.no_grad()
421
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
422
+ def __call__(
423
+ self,
424
+ image: PipelineImageInput,
425
+ prompt: Union[str, List[str]] = None,
426
+ negative_prompt: Optional[Union[str, List[str]]] = None,
427
+ num_inference_steps: Optional[int] = None,
428
+ ensemble_size: int = 1,
429
+ processing_resolution: Optional[int] = None,
430
+ match_input_resolution: bool = True,
431
+ resample_method_input: str = "bilinear",
432
+ resample_method_output: str = "bilinear",
433
+ batch_size: int = 1,
434
+ ensembling_kwargs: Optional[Dict[str, Any]] = None,
435
+ latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
436
+ prompt_embeds: Optional[torch.Tensor] = None,
437
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
438
+ num_images_per_prompt: Optional[int] = 1,
439
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
440
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
441
+ output_type: str = "np",
442
+ output_uncertainty: bool = False,
443
+ output_latent: bool = False,
444
+ return_dict: bool = True,
445
+ ):
446
+ """
447
+ Function invoked when calling the pipeline.
448
+
449
+ Args:
450
+ image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`),
451
+ `List[torch.Tensor]`: An input image or images used as an input for the normals estimation task. For
452
+ arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible
453
+ by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or
454
+ three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the
455
+ same width and height.
456
+ num_inference_steps (`int`, *optional*, defaults to `None`):
457
+ Number of denoising diffusion steps during inference. The default value `None` results in automatic
458
+ selection. The number of steps should be at least 10 with the full Marigold models, and between 1 and 4
459
+ for Marigold-LCM models.
460
+ ensemble_size (`int`, defaults to `1`):
461
+ Number of ensemble predictions. Recommended values are 5 and higher for better precision, or 1 for
462
+ faster inference.
463
+ processing_resolution (`int`, *optional*, defaults to `None`):
464
+ Effective processing resolution. When set to `0`, matches the larger input image dimension. This
465
+ produces crisper predictions, but may also lead to the overall loss of global context. The default
466
+ value `None` resolves to the optimal value from the model config.
467
+ match_input_resolution (`bool`, *optional*, defaults to `True`):
468
+ When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer
469
+ side of the output will equal to `processing_resolution`.
470
+ resample_method_input (`str`, *optional*, defaults to `"bilinear"`):
471
+ Resampling method used to resize input images to `processing_resolution`. The accepted values are:
472
+ `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`.
473
+ resample_method_output (`str`, *optional*, defaults to `"bilinear"`):
474
+ Resampling method used to resize output predictions to match the input resolution. The accepted values
475
+ are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`.
476
+ batch_size (`int`, *optional*, defaults to `1`):
477
+ Batch size; only matters when setting `ensemble_size` or passing a tensor of images.
478
+ ensembling_kwargs (`dict`, *optional*, defaults to `None`)
479
+ Extra dictionary with arguments for precise ensembling control. The following options are available:
480
+ - reduction (`str`, *optional*, defaults to `"closest"`): Defines the ensembling function applied in
481
+ every pixel location, can be either `"closest"` or `"mean"`.
482
+ latents (`torch.Tensor`, *optional*, defaults to `None`):
483
+ Latent noise tensors to replace the random initialization. These can be taken from the previous
484
+ function call's output.
485
+ generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`):
486
+ Random number generator object to ensure reproducibility.
487
+ output_type (`str`, *optional*, defaults to `"np"`):
488
+ Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted
489
+ values are: `"np"` (numpy array) or `"pt"` (torch tensor).
490
+ output_uncertainty (`bool`, *optional*, defaults to `False`):
491
+ When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that
492
+ the `ensemble_size` argument is set to a value above 2.
493
+ output_latent (`bool`, *optional*, defaults to `False`):
494
+ When enabled, the output's `latent` field contains the latent codes corresponding to the predictions
495
+ within the ensemble. These codes can be saved, modified, and used for subsequent calls with the
496
+ `latents` argument.
497
+ return_dict (`bool`, *optional*, defaults to `True`):
498
+ Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple.
499
+
500
+ Examples:
501
+
502
+ Returns:
503
+ [`~pipelines.marigold.MarigoldNormalsOutput`] or `tuple`:
504
+ If `return_dict` is `True`, [`~pipelines.marigold.MarigoldNormalsOutput`] is returned, otherwise a
505
+ `tuple` is returned where the first element is the prediction, the second element is the uncertainty
506
+ (or `None`), and the third is the latent (or `None`).
507
+ """
508
+
509
+ # 0. Resolving variables.
510
+ device = self._execution_device
511
+ dtype = self.dtype
512
+
513
+ # Model-specific optimal default values leading to fast and reasonable results.
514
+ if num_inference_steps is None:
515
+ num_inference_steps = self.default_denoising_steps
516
+ if processing_resolution is None:
517
+ processing_resolution = self.default_processing_resolution
518
+
519
+
520
+ image, padding, original_resolution = self.image_processor.preprocess(
521
+ image, processing_resolution, resample_method_input, device, dtype
522
+ ) # [N,3,PPH,PPW]
523
+
524
+ image_latent, gaus_noise = self.prepare_latents(
525
+ image, latents, generator, ensemble_size, batch_size
526
+ ) # [N,4,h,w], [N,4,h,w]
527
+
528
+ # 0. X_start latent obtain
529
+ predictor = self.x_start_pipeline(image, latents=gaus_noise,
530
+ processing_resolution=processing_resolution, skip_preprocess=True)
531
+ x_start_latent = predictor.latent
532
+
533
+ # 1. Check inputs.
534
+ num_images = self.check_inputs(
535
+ image,
536
+ num_inference_steps,
537
+ ensemble_size,
538
+ processing_resolution,
539
+ resample_method_input,
540
+ resample_method_output,
541
+ batch_size,
542
+ ensembling_kwargs,
543
+ latents,
544
+ generator,
545
+ output_type,
546
+ output_uncertainty,
547
+ )
548
+
549
+
550
+ # 2. Prepare empty text conditioning.
551
+ # Model invocation: self.tokenizer, self.text_encoder.
552
+ if self.empty_text_embedding is None:
553
+ prompt = ""
554
+ text_inputs = self.tokenizer(
555
+ prompt,
556
+ padding="do_not_pad",
557
+ max_length=self.tokenizer.model_max_length,
558
+ truncation=True,
559
+ return_tensors="pt",
560
+ )
561
+ text_input_ids = text_inputs.input_ids.to(device)
562
+ self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024]
563
+
564
+
565
+
566
+ # 3. prepare prompt
567
+ if self.prompt_embeds is None:
568
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
569
+ self.prompt,
570
+ device,
571
+ num_images_per_prompt,
572
+ False,
573
+ negative_prompt,
574
+ prompt_embeds=prompt_embeds,
575
+ negative_prompt_embeds=None,
576
+ lora_scale=None,
577
+ clip_skip=None,
578
+ )
579
+ self.prompt_embeds = prompt_embeds
580
+ self.negative_prompt_embeds = negative_prompt_embeds
581
+
582
+
583
+
584
+ # 5. dino guider features obtaining
585
+ ## TODO different case-1
586
+ dino_features = self.prior(image)
587
+ dino_features = self.dino_controlnet.dino_controlnet_cond_embedding(dino_features)
588
+ dino_features = self.match_noisy(dino_features, x_start_latent)
589
+
590
+ del (
591
+ image,
592
+ )
593
+
594
+ # 7. denoise sampling, using heuritic sampling proposed by Ye.
595
+
596
+ t_start = self.x_start_pipeline.t_start
597
+ self.scheduler.set_timesteps(num_inference_steps, t_start=t_start,device=device)
598
+
599
+ cond_scale =controlnet_conditioning_scale
600
+ pred_latent = x_start_latent
601
+
602
+ cur_step = 0
603
+
604
+ # dino controlnet
605
+ dino_down_block_res_samples, dino_mid_block_res_sample = self.dino_controlnet(
606
+ dino_features.detach(),
607
+ 0, # not depend on time steps
608
+ encoder_hidden_states=self.prompt_embeds,
609
+ conditioning_scale=cond_scale,
610
+ guess_mode=False,
611
+ return_dict=False,
612
+ )
613
+ assert dino_mid_block_res_sample == None
614
+
615
+ pred_latents = []
616
+
617
+ last_pred_latent = pred_latent
618
+ for (t, prev_t) in self.progress_bar(zip(self.scheduler.timesteps,self.scheduler.prev_timesteps), leave=False, desc="Diffusion steps..."):
619
+
620
+ _dino_down_block_res_samples = [dino_down_block_res_sample for dino_down_block_res_sample in dino_down_block_res_samples] # copy, avoid repeat quiery
621
+
622
+ # controlnet
623
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
624
+ image_latent.detach(),
625
+ t,
626
+ encoder_hidden_states=self.prompt_embeds,
627
+ conditioning_scale=cond_scale,
628
+ guess_mode=False,
629
+ return_dict=False,
630
+ )
631
+
632
+ # SG-DRN
633
+ noise = self.dino_unet_forward(
634
+ self.unet,
635
+ pred_latent,
636
+ t,
637
+ encoder_hidden_states=self.prompt_embeds,
638
+ down_block_additional_residuals=down_block_res_samples,
639
+ mid_block_additional_residual=mid_block_res_sample,
640
+ dino_down_block_additional_residuals= _dino_down_block_res_samples,
641
+ return_dict=False,
642
+ )[0] # [B,4,h,w]
643
+
644
+ pred_latents.append(noise)
645
+ # ddim steps
646
+ out = self.scheduler.step(
647
+ noise, t, prev_t, pred_latent, gaus_noise = gaus_noise, generator=generator, cur_step=cur_step+1 # NOTE that cur_step dirs to next_step
648
+ )# [B,4,h,w]
649
+ pred_latent = out.prev_sample
650
+
651
+ cur_step += 1
652
+
653
+ del (
654
+ image_latent,
655
+ dino_features,
656
+ )
657
+ pred_latent = pred_latents[-1] # using x0
658
+
659
+ # decoder
660
+ prediction = self.decode_prediction(pred_latent)
661
+ prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,3,PH,PW]
662
+ prediction = self.image_processor.resize_antialias(prediction, original_resolution, resample_method_output, is_aa=False) # [N,3,H,W]
663
+
664
+ if match_input_resolution:
665
+ prediction = self.image_processor.resize_antialias(
666
+ prediction, original_resolution, resample_method_output, is_aa=False
667
+ ) # [N,3,H,W]
668
+
669
+ if match_input_resolution:
670
+ prediction = self.image_processor.resize_antialias(
671
+ prediction, original_resolution, resample_method_output, is_aa=False
672
+ ) # [N,3,H,W]
673
+ prediction = self.normalize_normals(prediction) # [N,3,H,W]
674
+
675
+ if output_type == "np":
676
+ prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,3]
677
+ prediction = prediction.clip(min=-1, max=1)
678
+
679
+ # 11. Offload all models
680
+ self.maybe_free_model_hooks()
681
+
682
+ return StableNormalOutput(
683
+ prediction=prediction,
684
+ latent=pred_latent,
685
+ gaus_noise=gaus_noise
686
+ )
687
+
688
+ # Copied from diffusers.pipelines.marigold.pipeline_marigold_depth.MarigoldDepthPipeline.prepare_latents
689
+ def prepare_latents(
690
+ self,
691
+ image: torch.Tensor,
692
+ latents: Optional[torch.Tensor],
693
+ generator: Optional[torch.Generator],
694
+ ensemble_size: int,
695
+ batch_size: int,
696
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
697
+ def retrieve_latents(encoder_output):
698
+ if hasattr(encoder_output, "latent_dist"):
699
+ return encoder_output.latent_dist.mode()
700
+ elif hasattr(encoder_output, "latents"):
701
+ return encoder_output.latents
702
+ else:
703
+ raise AttributeError("Could not access latents of provided encoder_output")
704
+
705
+
706
+
707
+ image_latent = torch.cat(
708
+ [
709
+ retrieve_latents(self.vae.encode(image[i : i + batch_size]))
710
+ for i in range(0, image.shape[0], batch_size)
711
+ ],
712
+ dim=0,
713
+ ) # [N,4,h,w]
714
+ image_latent = image_latent * self.vae.config.scaling_factor
715
+ image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w]
716
+
717
+ pred_latent = latents
718
+ if pred_latent is None:
719
+
720
+
721
+ pred_latent = randn_tensor(
722
+ image_latent.shape,
723
+ generator=generator,
724
+ device=image_latent.device,
725
+ dtype=image_latent.dtype,
726
+ ) # [N*E,4,h,w]
727
+
728
+ return image_latent, pred_latent
729
+
730
+ def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor:
731
+ if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels:
732
+ raise ValueError(
733
+ f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}."
734
+ )
735
+
736
+ prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W]
737
+
738
+ return prediction # [B,3,H,W]
739
+
740
+ @staticmethod
741
+ def normalize_normals(normals: torch.Tensor, eps: float = 1e-6) -> torch.Tensor:
742
+ if normals.dim() != 4 or normals.shape[1] != 3:
743
+ raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.")
744
+
745
+ norm = torch.norm(normals, dim=1, keepdim=True)
746
+ normals /= norm.clamp(min=eps)
747
+
748
+ return normals
749
+
750
+ @staticmethod
751
+ def match_noisy(dino, noisy):
752
+ _, __, dino_h, dino_w = dino.shape
753
+ _, __, h, w = noisy.shape
754
+
755
+ if h == dino_h and w == dino_w:
756
+ return dino
757
+ else:
758
+ return F.interpolate(dino, (h, w), mode='bilinear')
759
+
760
+
761
+
762
+
763
+
764
+
765
+
766
+
767
+
768
+
769
+ @staticmethod
770
+ def dino_unet_forward(
771
+ self, # NOTE that repurpose to UNet
772
+ sample: torch.Tensor,
773
+ timestep: Union[torch.Tensor, float, int],
774
+ encoder_hidden_states: torch.Tensor,
775
+ class_labels: Optional[torch.Tensor] = None,
776
+ timestep_cond: Optional[torch.Tensor] = None,
777
+ attention_mask: Optional[torch.Tensor] = None,
778
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
779
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
780
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
781
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
782
+ dino_down_block_additional_residuals: Optional[torch.Tensor] = None,
783
+ down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
784
+ encoder_attention_mask: Optional[torch.Tensor] = None,
785
+ return_dict: bool = True,
786
+ ) -> Union[UNet2DConditionOutput, Tuple]:
787
+ r"""
788
+ The [`UNet2DConditionModel`] forward method.
789
+
790
+ Args:
791
+ sample (`torch.Tensor`):
792
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
793
+ timestep (`torch.Tensor` or `float` or `int`): The number of timesteps to denoise an input.
794
+ encoder_hidden_states (`torch.Tensor`):
795
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
796
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
797
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
798
+ timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
799
+ Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
800
+ through the `self.time_embedding` layer to obtain the timestep embeddings.
801
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
802
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
803
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
804
+ negative values to the attention scores corresponding to "discard" tokens.
805
+ cross_attention_kwargs (`dict`, *optional*):
806
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
807
+ `self.processor` in
808
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
809
+ added_cond_kwargs: (`dict`, *optional*):
810
+ A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
811
+ are passed along to the UNet blocks.
812
+ down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
813
+ A tuple of tensors that if specified are added to the residuals of down unet blocks.
814
+ mid_block_additional_residual: (`torch.Tensor`, *optional*):
815
+ A tensor that if specified is added to the residual of the middle unet block.
816
+ down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
817
+ additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
818
+ encoder_attention_mask (`torch.Tensor`):
819
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
820
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
821
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
822
+ return_dict (`bool`, *optional*, defaults to `True`):
823
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
824
+ tuple.
825
+
826
+ Returns:
827
+ [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
828
+ If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
829
+ otherwise a `tuple` is returned where the first element is the sample tensor.
830
+ """
831
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
832
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
833
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
834
+ # on the fly if necessary.
835
+
836
+
837
+ default_overall_up_factor = 2**self.num_upsamplers
838
+
839
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
840
+ forward_upsample_size = False
841
+ upsample_size = None
842
+
843
+ for dim in sample.shape[-2:]:
844
+ if dim % default_overall_up_factor != 0:
845
+ # Forward upsample size to force interpolation output size.
846
+ forward_upsample_size = True
847
+ break
848
+
849
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
850
+ # expects mask of shape:
851
+ # [batch, key_tokens]
852
+ # adds singleton query_tokens dimension:
853
+ # [batch, 1, key_tokens]
854
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
855
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
856
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
857
+ if attention_mask is not None:
858
+ # assume that mask is expressed as:
859
+ # (1 = keep, 0 = discard)
860
+ # convert mask into a bias that can be added to attention scores:
861
+ # (keep = +0, discard = -10000.0)
862
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
863
+ attention_mask = attention_mask.unsqueeze(1)
864
+
865
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
866
+ if encoder_attention_mask is not None:
867
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
868
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
869
+
870
+ # 0. center input if necessary
871
+ if self.config.center_input_sample:
872
+ sample = 2 * sample - 1.0
873
+
874
+ # 1. time
875
+ t_emb = self.get_time_embed(sample=sample, timestep=timestep)
876
+ emb = self.time_embedding(t_emb, timestep_cond)
877
+ aug_emb = None
878
+
879
+ class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
880
+ if class_emb is not None:
881
+ if self.config.class_embeddings_concat:
882
+ emb = torch.cat([emb, class_emb], dim=-1)
883
+ else:
884
+ emb = emb + class_emb
885
+
886
+ aug_emb = self.get_aug_embed(
887
+ emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
888
+ )
889
+ if self.config.addition_embed_type == "image_hint":
890
+ aug_emb, hint = aug_emb
891
+ sample = torch.cat([sample, hint], dim=1)
892
+
893
+ emb = emb + aug_emb if aug_emb is not None else emb
894
+
895
+ if self.time_embed_act is not None:
896
+ emb = self.time_embed_act(emb)
897
+
898
+ encoder_hidden_states = self.process_encoder_hidden_states(
899
+ encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
900
+ )
901
+
902
+ # 2. pre-process
903
+ sample = self.conv_in(sample)
904
+
905
+ # 2.5 GLIGEN position net
906
+ if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
907
+ cross_attention_kwargs = cross_attention_kwargs.copy()
908
+ gligen_args = cross_attention_kwargs.pop("gligen")
909
+ cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
910
+
911
+ # 3. down
912
+ # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
913
+ # to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
914
+ if cross_attention_kwargs is not None:
915
+ cross_attention_kwargs = cross_attention_kwargs.copy()
916
+ lora_scale = cross_attention_kwargs.pop("scale", 1.0)
917
+ else:
918
+ lora_scale = 1.0
919
+
920
+ if USE_PEFT_BACKEND:
921
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
922
+ scale_lora_layers(self, lora_scale)
923
+
924
+ is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
925
+ # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
926
+ is_adapter = down_intrablock_additional_residuals is not None
927
+ # maintain backward compatibility for legacy usage, where
928
+ # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
929
+ # but can only use one or the other
930
+ if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
931
+ deprecate(
932
+ "T2I should not use down_block_additional_residuals",
933
+ "1.3.0",
934
+ "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
935
+ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
936
+ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
937
+ standard_warn=False,
938
+ )
939
+ down_intrablock_additional_residuals = down_block_additional_residuals
940
+ is_adapter = True
941
+
942
+
943
+
944
+ def residual_downforward(
945
+ self, hidden_states: torch.Tensor, temb: Optional[torch.Tensor] = None,
946
+ additional_residuals: Optional[torch.Tensor] = None,
947
+ *args, **kwargs,
948
+ ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]:
949
+ if len(args) > 0 or kwargs.get("scale", None) is not None:
950
+ deprecation_message = "The `scale` argument is deprecated and will be ignored. Please remove it, as passing it will raise an error in the future. `scale` should directly be passed while calling the underlying pipeline component i.e., via `cross_attention_kwargs`."
951
+ deprecate("scale", "1.0.0", deprecation_message)
952
+
953
+ output_states = ()
954
+
955
+ for resnet in self.resnets:
956
+ if self.training and self.gradient_checkpointing:
957
+
958
+ def create_custom_forward(module):
959
+ def custom_forward(*inputs):
960
+ return module(*inputs)
961
+
962
+ return custom_forward
963
+
964
+ if is_torch_version(">=", "1.11.0"):
965
+ hidden_states = torch.utils.checkpoint.checkpoint(
966
+ create_custom_forward(resnet), hidden_states, temb, use_reentrant=False
967
+ )
968
+ else:
969
+ hidden_states = torch.utils.checkpoint.checkpoint(
970
+ create_custom_forward(resnet), hidden_states, temb
971
+ )
972
+ else:
973
+ hidden_states = resnet(hidden_states, temb)
974
+ hidden_states += additional_residuals.pop(0)
975
+
976
+
977
+ output_states = output_states + (hidden_states,)
978
+
979
+ if self.downsamplers is not None:
980
+ for downsampler in self.downsamplers:
981
+ hidden_states = downsampler(hidden_states)
982
+ hidden_states += additional_residuals.pop(0)
983
+
984
+ output_states = output_states + (hidden_states,)
985
+
986
+ return hidden_states, output_states
987
+
988
+
989
+ def residual_blockforward(
990
+ self, ## NOTE that repurpose to unet_blocks
991
+ hidden_states: torch.Tensor,
992
+ temb: Optional[torch.Tensor] = None,
993
+ encoder_hidden_states: Optional[torch.Tensor] = None,
994
+ attention_mask: Optional[torch.Tensor] = None,
995
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
996
+ encoder_attention_mask: Optional[torch.Tensor] = None,
997
+ additional_residuals: Optional[torch.Tensor] = None,
998
+ ) -> Tuple[torch.Tensor, Tuple[torch.Tensor, ...]]:
999
+ if cross_attention_kwargs is not None:
1000
+ if cross_attention_kwargs.get("scale", None) is not None:
1001
+ logger.warning("Passing `scale` to `cross_attention_kwargs` is deprecated. `scale` will be ignored.")
1002
+
1003
+
1004
+
1005
+ output_states = ()
1006
+
1007
+ blocks = list(zip(self.resnets, self.attentions))
1008
+
1009
+ for i, (resnet, attn) in enumerate(blocks):
1010
+ if self.training and self.gradient_checkpointing:
1011
+
1012
+ def create_custom_forward(module, return_dict=None):
1013
+ def custom_forward(*inputs):
1014
+ if return_dict is not None:
1015
+ return module(*inputs, return_dict=return_dict)
1016
+ else:
1017
+ return module(*inputs)
1018
+
1019
+ return custom_forward
1020
+
1021
+ ckpt_kwargs: Dict[str, Any] = {"use_reentrant": False} if is_torch_version(">=", "1.11.0") else {}
1022
+ hidden_states = torch.utils.checkpoint.checkpoint(
1023
+ create_custom_forward(resnet),
1024
+ hidden_states,
1025
+ temb,
1026
+ **ckpt_kwargs,
1027
+ )
1028
+ hidden_states = attn(
1029
+ hidden_states,
1030
+ encoder_hidden_states=encoder_hidden_states,
1031
+ cross_attention_kwargs=cross_attention_kwargs,
1032
+ attention_mask=attention_mask,
1033
+ encoder_attention_mask=encoder_attention_mask,
1034
+ return_dict=False,
1035
+ )[0]
1036
+ else:
1037
+ hidden_states = resnet(hidden_states, temb)
1038
+ hidden_states = attn(
1039
+ hidden_states,
1040
+ encoder_hidden_states=encoder_hidden_states,
1041
+ cross_attention_kwargs=cross_attention_kwargs,
1042
+ attention_mask=attention_mask,
1043
+ encoder_attention_mask=encoder_attention_mask,
1044
+ return_dict=False,
1045
+ )[0]
1046
+
1047
+ hidden_states += additional_residuals.pop(0)
1048
+
1049
+ output_states = output_states + (hidden_states,)
1050
+
1051
+ if self.downsamplers is not None:
1052
+ for downsampler in self.downsamplers:
1053
+ hidden_states = downsampler(hidden_states)
1054
+ hidden_states += additional_residuals.pop(0)
1055
+
1056
+ output_states = output_states + (hidden_states,)
1057
+
1058
+ return hidden_states, output_states
1059
+
1060
+
1061
+ down_intrablock_additional_residuals = dino_down_block_additional_residuals
1062
+
1063
+ sample += down_intrablock_additional_residuals.pop(0)
1064
+ down_block_res_samples = (sample,)
1065
+
1066
+ for downsample_block in self.down_blocks:
1067
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1068
+
1069
+ sample, res_samples = residual_blockforward(
1070
+ downsample_block,
1071
+ hidden_states=sample,
1072
+ temb=emb,
1073
+ encoder_hidden_states=encoder_hidden_states,
1074
+ attention_mask=attention_mask,
1075
+ cross_attention_kwargs=cross_attention_kwargs,
1076
+ encoder_attention_mask=encoder_attention_mask,
1077
+ additional_residuals = down_intrablock_additional_residuals,
1078
+ )
1079
+
1080
+ else:
1081
+ sample, res_samples = residual_downforward(
1082
+ downsample_block,
1083
+ hidden_states=sample,
1084
+ temb=emb,
1085
+ additional_residuals = down_intrablock_additional_residuals,
1086
+ )
1087
+
1088
+
1089
+ down_block_res_samples += res_samples
1090
+
1091
+
1092
+ if is_controlnet:
1093
+ new_down_block_res_samples = ()
1094
+
1095
+ for down_block_res_sample, down_block_additional_residual in zip(
1096
+ down_block_res_samples, down_block_additional_residuals
1097
+ ):
1098
+ down_block_res_sample = down_block_res_sample + down_block_additional_residual
1099
+ new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample,)
1100
+
1101
+ down_block_res_samples = new_down_block_res_samples
1102
+
1103
+ # 4. mid
1104
+ if self.mid_block is not None:
1105
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
1106
+ sample = self.mid_block(
1107
+ sample,
1108
+ emb,
1109
+ encoder_hidden_states=encoder_hidden_states,
1110
+ attention_mask=attention_mask,
1111
+ cross_attention_kwargs=cross_attention_kwargs,
1112
+ encoder_attention_mask=encoder_attention_mask,
1113
+ )
1114
+ else:
1115
+ sample = self.mid_block(sample, emb)
1116
+
1117
+ # To support T2I-Adapter-XL
1118
+ if (
1119
+ is_adapter
1120
+ and len(down_intrablock_additional_residuals) > 0
1121
+ and sample.shape == down_intrablock_additional_residuals[0].shape
1122
+ ):
1123
+ sample += down_intrablock_additional_residuals.pop(0)
1124
+
1125
+ if is_controlnet:
1126
+ sample = sample + mid_block_additional_residual
1127
+
1128
+ # 5. up
1129
+ for i, upsample_block in enumerate(self.up_blocks):
1130
+ is_final_block = i == len(self.up_blocks) - 1
1131
+
1132
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1133
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1134
+
1135
+ # if we have not reached the final block and need to forward the
1136
+ # upsample size, we do it here
1137
+ if not is_final_block and forward_upsample_size:
1138
+ upsample_size = down_block_res_samples[-1].shape[2:]
1139
+
1140
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1141
+ sample = upsample_block(
1142
+ hidden_states=sample,
1143
+ temb=emb,
1144
+ res_hidden_states_tuple=res_samples,
1145
+ encoder_hidden_states=encoder_hidden_states,
1146
+ cross_attention_kwargs=cross_attention_kwargs,
1147
+ upsample_size=upsample_size,
1148
+ attention_mask=attention_mask,
1149
+ encoder_attention_mask=encoder_attention_mask,
1150
+ )
1151
+ else:
1152
+ sample = upsample_block(
1153
+ hidden_states=sample,
1154
+ temb=emb,
1155
+ res_hidden_states_tuple=res_samples,
1156
+ upsample_size=upsample_size,
1157
+ )
1158
+
1159
+ # 6. post-process
1160
+ if self.conv_norm_out:
1161
+ sample = self.conv_norm_out(sample)
1162
+ sample = self.conv_act(sample)
1163
+ sample = self.conv_out(sample)
1164
+
1165
+ if USE_PEFT_BACKEND:
1166
+ # remove `lora_scale` from each PEFT layer
1167
+ unscale_lora_layers(self, lora_scale)
1168
+
1169
+ if not return_dict:
1170
+ return (sample,)
1171
+
1172
+ return UNet2DConditionOutput(sample=sample)
1173
+
1174
+
1175
+
1176
+ @staticmethod
1177
+ def ensemble_normals(
1178
+ normals: torch.Tensor, output_uncertainty: bool, reduction: str = "closest"
1179
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
1180
+ """
1181
+ Ensembles the normals maps represented by the `normals` tensor with expected shape `(B, 3, H, W)`, where B is
1182
+ the number of ensemble members for a given prediction of size `(H x W)`.
1183
+
1184
+ Args:
1185
+ normals (`torch.Tensor`):
1186
+ Input ensemble normals maps.
1187
+ output_uncertainty (`bool`, *optional*, defaults to `False`):
1188
+ Whether to output uncertainty map.
1189
+ reduction (`str`, *optional*, defaults to `"closest"`):
1190
+ Reduction method used to ensemble aligned predictions. The accepted values are: `"closest"` and
1191
+ `"mean"`.
1192
+
1193
+ Returns:
1194
+ A tensor of aligned and ensembled normals maps with shape `(1, 3, H, W)` and optionally a tensor of
1195
+ uncertainties of shape `(1, 1, H, W)`.
1196
+ """
1197
+ if normals.dim() != 4 or normals.shape[1] != 3:
1198
+ raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.")
1199
+ if reduction not in ("closest", "mean"):
1200
+ raise ValueError(f"Unrecognized reduction method: {reduction}.")
1201
+
1202
+ mean_normals = normals.mean(dim=0, keepdim=True) # [1,3,H,W]
1203
+ mean_normals = MarigoldNormalsPipeline.normalize_normals(mean_normals) # [1,3,H,W]
1204
+
1205
+ sim_cos = (mean_normals * normals).sum(dim=1, keepdim=True) # [E,1,H,W]
1206
+ sim_cos = sim_cos.clamp(-1, 1) # required to avoid NaN in uncertainty with fp16
1207
+
1208
+ uncertainty = None
1209
+ if output_uncertainty:
1210
+ uncertainty = sim_cos.arccos() # [E,1,H,W]
1211
+ uncertainty = uncertainty.mean(dim=0, keepdim=True) / np.pi # [1,1,H,W]
1212
+
1213
+ if reduction == "mean":
1214
+ return mean_normals, uncertainty # [1,3,H,W], [1,1,H,W]
1215
+
1216
+ closest_indices = sim_cos.argmax(dim=0, keepdim=True) # [1,1,H,W]
1217
+ closest_indices = closest_indices.repeat(1, 3, 1, 1) # [1,3,H,W]
1218
+ closest_normals = torch.gather(normals, 0, closest_indices) # [1,3,H,W]
1219
+
1220
+ return closest_normals, uncertainty # [1,3,H,W], [1,1,H,W]
1221
+
1222
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
1223
+ def retrieve_timesteps(
1224
+ scheduler,
1225
+ num_inference_steps: Optional[int] = None,
1226
+ device: Optional[Union[str, torch.device]] = None,
1227
+ timesteps: Optional[List[int]] = None,
1228
+ sigmas: Optional[List[float]] = None,
1229
+ **kwargs,
1230
+ ):
1231
+ """
1232
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
1233
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
1234
+
1235
+ Args:
1236
+ scheduler (`SchedulerMixin`):
1237
+ The scheduler to get timesteps from.
1238
+ num_inference_steps (`int`):
1239
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
1240
+ must be `None`.
1241
+ device (`str` or `torch.device`, *optional*):
1242
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
1243
+ timesteps (`List[int]`, *optional*):
1244
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
1245
+ `num_inference_steps` and `sigmas` must be `None`.
1246
+ sigmas (`List[float]`, *optional*):
1247
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
1248
+ `num_inference_steps` and `timesteps` must be `None`.
1249
+
1250
+ Returns:
1251
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
1252
+ second element is the number of inference steps.
1253
+ """
1254
+ if timesteps is not None and sigmas is not None:
1255
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
1256
+ if timesteps is not None:
1257
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
1258
+ if not accepts_timesteps:
1259
+ raise ValueError(
1260
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
1261
+ f" timestep schedules. Please check whether you are using the correct scheduler."
1262
+ )
1263
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
1264
+ timesteps = scheduler.timesteps
1265
+ num_inference_steps = len(timesteps)
1266
+ elif sigmas is not None:
1267
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
1268
+ if not accept_sigmas:
1269
+ raise ValueError(
1270
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
1271
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
1272
+ )
1273
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
1274
+ timesteps = scheduler.timesteps
1275
+ num_inference_steps = len(timesteps)
1276
+ else:
1277
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
1278
+ timesteps = scheduler.timesteps
1279
+ return timesteps, num_inference_steps
stablenormal/pipeline_yoso_normal.py ADDED
@@ -0,0 +1,727 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Marigold authors, PRS ETH Zurich. All rights reserved.
2
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ # --------------------------------------------------------------------------
16
+ # More information and citation instructions are available on the
17
+ # --------------------------------------------------------------------------
18
+ from dataclasses import dataclass
19
+ from typing import Any, Dict, List, Optional, Tuple, Union
20
+
21
+ import numpy as np
22
+ import torch
23
+ from PIL import Image
24
+ from tqdm.auto import tqdm
25
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
26
+
27
+
28
+ from diffusers.image_processor import PipelineImageInput
29
+ from diffusers.models import (
30
+ AutoencoderKL,
31
+ UNet2DConditionModel,
32
+ ControlNetModel,
33
+ )
34
+ from diffusers.schedulers import (
35
+ DDIMScheduler
36
+ )
37
+
38
+ from diffusers.utils import (
39
+ BaseOutput,
40
+ logging,
41
+ replace_example_docstring,
42
+ )
43
+
44
+
45
+ from diffusers.utils.torch_utils import randn_tensor
46
+ from diffusers.pipelines.controlnet import StableDiffusionControlNetPipeline
47
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
48
+ from diffusers.pipelines.marigold.marigold_image_processing import MarigoldImageProcessor
49
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
50
+
51
+ import pdb
52
+
53
+
54
+
55
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
56
+
57
+
58
+ EXAMPLE_DOC_STRING = """
59
+ Examples:
60
+ ```py
61
+ >>> import diffusers
62
+ >>> import torch
63
+
64
+ >>> pipe = diffusers.MarigoldNormalsPipeline.from_pretrained(
65
+ ... "prs-eth/marigold-normals-lcm-v0-1", variant="fp16", torch_dtype=torch.float16
66
+ ... ).to("cuda")
67
+
68
+ >>> image = diffusers.utils.load_image("https://marigoldmonodepth.github.io/images/einstein.jpg")
69
+ >>> normals = pipe(image)
70
+
71
+ >>> vis = pipe.image_processor.visualize_normals(normals.prediction)
72
+ >>> vis[0].save("einstein_normals.png")
73
+ ```
74
+ """
75
+
76
+
77
+ @dataclass
78
+ class YosoNormalsOutput(BaseOutput):
79
+ """
80
+ Output class for Marigold monocular normals prediction pipeline.
81
+
82
+ Args:
83
+ prediction (`np.ndarray`, `torch.Tensor`):
84
+ Predicted normals with values in the range [-1, 1]. The shape is always $numimages \times 3 \times height
85
+ \times width$, regardless of whether the images were passed as a 4D array or a list.
86
+ uncertainty (`None`, `np.ndarray`, `torch.Tensor`):
87
+ Uncertainty maps computed from the ensemble, with values in the range [0, 1]. The shape is $numimages
88
+ \times 1 \times height \times width$.
89
+ latent (`None`, `torch.Tensor`):
90
+ Latent features corresponding to the predictions, compatible with the `latents` argument of the pipeline.
91
+ The shape is $numimages * numensemble \times 4 \times latentheight \times latentwidth$.
92
+ """
93
+
94
+ prediction: Union[np.ndarray, torch.Tensor]
95
+ latent: Union[None, torch.Tensor]
96
+ gaus_noise: Union[None, torch.Tensor]
97
+
98
+
99
+ class YOSONormalsPipeline(StableDiffusionControlNetPipeline):
100
+ """ Pipeline for monocular normals estimation using the Marigold method: https://marigoldmonodepth.github.io.
101
+ Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.
102
+
103
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
104
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
105
+
106
+ The pipeline also inherits the following loading methods:
107
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
108
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
109
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
110
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
111
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
112
+
113
+ Args:
114
+ vae ([`AutoencoderKL`]):
115
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
116
+ text_encoder ([`~transformers.CLIPTextModel`]):
117
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
118
+ tokenizer ([`~transformers.CLIPTokenizer`]):
119
+ A `CLIPTokenizer` to tokenize text.
120
+ unet ([`UNet2DConditionModel`]):
121
+ A `UNet2DConditionModel` to denoise the encoded image latents.
122
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
123
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
124
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
125
+ additional conditioning.
126
+ scheduler ([`SchedulerMixin`]):
127
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
128
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
129
+ safety_checker ([`StableDiffusionSafetyChecker`]):
130
+ Classification module that estimates whether generated images could be considered offensive or harmful.
131
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
132
+ about a model's potential harms.
133
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
134
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
135
+ """
136
+
137
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
138
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
139
+ _exclude_from_cpu_offload = ["safety_checker"]
140
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
141
+
142
+
143
+
144
+ def __init__(
145
+ self,
146
+ vae: AutoencoderKL,
147
+ text_encoder: CLIPTextModel,
148
+ tokenizer: CLIPTokenizer,
149
+ unet: UNet2DConditionModel,
150
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel]],
151
+ scheduler: Union[DDIMScheduler],
152
+ safety_checker: StableDiffusionSafetyChecker,
153
+ feature_extractor: CLIPImageProcessor,
154
+ image_encoder: CLIPVisionModelWithProjection = None,
155
+ requires_safety_checker: bool = True,
156
+ default_denoising_steps: Optional[int] = 1,
157
+ default_processing_resolution: Optional[int] = 768,
158
+ prompt="",
159
+ empty_text_embedding=None,
160
+ t_start: Optional[int] = 401,
161
+ ):
162
+ super().__init__(
163
+ vae,
164
+ text_encoder,
165
+ tokenizer,
166
+ unet,
167
+ controlnet,
168
+ scheduler,
169
+ safety_checker,
170
+ feature_extractor,
171
+ image_encoder,
172
+ requires_safety_checker,
173
+ )
174
+
175
+ # TODO yoso ImageProcessor
176
+ self.image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor)
177
+ self.control_image_processor = MarigoldImageProcessor(vae_scale_factor=self.vae_scale_factor)
178
+ self.default_denoising_steps = default_denoising_steps
179
+ self.default_processing_resolution = default_processing_resolution
180
+ self.prompt = prompt
181
+ self.prompt_embeds = None
182
+ self.empty_text_embedding = empty_text_embedding
183
+ self.t_start= t_start # target_out latents
184
+
185
+ def check_inputs(
186
+ self,
187
+ image: PipelineImageInput,
188
+ num_inference_steps: int,
189
+ ensemble_size: int,
190
+ processing_resolution: int,
191
+ resample_method_input: str,
192
+ resample_method_output: str,
193
+ batch_size: int,
194
+ ensembling_kwargs: Optional[Dict[str, Any]],
195
+ latents: Optional[torch.Tensor],
196
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]],
197
+ output_type: str,
198
+ output_uncertainty: bool,
199
+ ) -> int:
200
+ if num_inference_steps is None:
201
+ raise ValueError("`num_inference_steps` is not specified and could not be resolved from the model config.")
202
+ if num_inference_steps < 1:
203
+ raise ValueError("`num_inference_steps` must be positive.")
204
+ if ensemble_size < 1:
205
+ raise ValueError("`ensemble_size` must be positive.")
206
+ if ensemble_size == 2:
207
+ logger.warning(
208
+ "`ensemble_size` == 2 results are similar to no ensembling (1); "
209
+ "consider increasing the value to at least 3."
210
+ )
211
+ if ensemble_size == 1 and output_uncertainty:
212
+ raise ValueError(
213
+ "Computing uncertainty by setting `output_uncertainty=True` also requires setting `ensemble_size` "
214
+ "greater than 1."
215
+ )
216
+ if processing_resolution is None:
217
+ raise ValueError(
218
+ "`processing_resolution` is not specified and could not be resolved from the model config."
219
+ )
220
+ if processing_resolution < 0:
221
+ raise ValueError(
222
+ "`processing_resolution` must be non-negative: 0 for native resolution, or any positive value for "
223
+ "downsampled processing."
224
+ )
225
+ if processing_resolution % self.vae_scale_factor != 0:
226
+ raise ValueError(f"`processing_resolution` must be a multiple of {self.vae_scale_factor}.")
227
+ if resample_method_input not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"):
228
+ raise ValueError(
229
+ "`resample_method_input` takes string values compatible with PIL library: "
230
+ "nearest, nearest-exact, bilinear, bicubic, area."
231
+ )
232
+ if resample_method_output not in ("nearest", "nearest-exact", "bilinear", "bicubic", "area"):
233
+ raise ValueError(
234
+ "`resample_method_output` takes string values compatible with PIL library: "
235
+ "nearest, nearest-exact, bilinear, bicubic, area."
236
+ )
237
+ if batch_size < 1:
238
+ raise ValueError("`batch_size` must be positive.")
239
+ if output_type not in ["pt", "np"]:
240
+ raise ValueError("`output_type` must be one of `pt` or `np`.")
241
+ if latents is not None and generator is not None:
242
+ raise ValueError("`latents` and `generator` cannot be used together.")
243
+ if ensembling_kwargs is not None:
244
+ if not isinstance(ensembling_kwargs, dict):
245
+ raise ValueError("`ensembling_kwargs` must be a dictionary.")
246
+ if "reduction" in ensembling_kwargs and ensembling_kwargs["reduction"] not in ("closest", "mean"):
247
+ raise ValueError("`ensembling_kwargs['reduction']` can be either `'closest'` or `'mean'`.")
248
+
249
+ # image checks
250
+ num_images = 0
251
+ W, H = None, None
252
+ if not isinstance(image, list):
253
+ image = [image]
254
+ for i, img in enumerate(image):
255
+ if isinstance(img, np.ndarray) or torch.is_tensor(img):
256
+ if img.ndim not in (2, 3, 4):
257
+ raise ValueError(f"`image[{i}]` has unsupported dimensions or shape: {img.shape}.")
258
+ H_i, W_i = img.shape[-2:]
259
+ N_i = 1
260
+ if img.ndim == 4:
261
+ N_i = img.shape[0]
262
+ elif isinstance(img, Image.Image):
263
+ W_i, H_i = img.size
264
+ N_i = 1
265
+ else:
266
+ raise ValueError(f"Unsupported `image[{i}]` type: {type(img)}.")
267
+ if W is None:
268
+ W, H = W_i, H_i
269
+ elif (W, H) != (W_i, H_i):
270
+ raise ValueError(
271
+ f"Input `image[{i}]` has incompatible dimensions {(W_i, H_i)} with the previous images {(W, H)}"
272
+ )
273
+ num_images += N_i
274
+
275
+ # latents checks
276
+ if latents is not None:
277
+ if not torch.is_tensor(latents):
278
+ raise ValueError("`latents` must be a torch.Tensor.")
279
+ if latents.dim() != 4:
280
+ raise ValueError(f"`latents` has unsupported dimensions or shape: {latents.shape}.")
281
+
282
+ if processing_resolution > 0:
283
+ max_orig = max(H, W)
284
+ new_H = H * processing_resolution // max_orig
285
+ new_W = W * processing_resolution // max_orig
286
+ if new_H == 0 or new_W == 0:
287
+ raise ValueError(f"Extreme aspect ratio of the input image: [{W} x {H}]")
288
+ W, H = new_W, new_H
289
+ w = (W + self.vae_scale_factor - 1) // self.vae_scale_factor
290
+ h = (H + self.vae_scale_factor - 1) // self.vae_scale_factor
291
+ shape_expected = (num_images * ensemble_size, self.vae.config.latent_channels, h, w)
292
+
293
+ if latents.shape != shape_expected:
294
+ raise ValueError(f"`latents` has unexpected shape={latents.shape} expected={shape_expected}.")
295
+
296
+ # generator checks
297
+ if generator is not None:
298
+ if isinstance(generator, list):
299
+ if len(generator) != num_images * ensemble_size:
300
+ raise ValueError(
301
+ "The number of generators must match the total number of ensemble members for all input images."
302
+ )
303
+ if not all(g.device.type == generator[0].device.type for g in generator):
304
+ raise ValueError("`generator` device placement is not consistent in the list.")
305
+ elif not isinstance(generator, torch.Generator):
306
+ raise ValueError(f"Unsupported generator type: {type(generator)}.")
307
+
308
+ return num_images
309
+
310
+ def progress_bar(self, iterable=None, total=None, desc=None, leave=True):
311
+ if not hasattr(self, "_progress_bar_config"):
312
+ self._progress_bar_config = {}
313
+ elif not isinstance(self._progress_bar_config, dict):
314
+ raise ValueError(
315
+ f"`self._progress_bar_config` should be of type `dict`, but is {type(self._progress_bar_config)}."
316
+ )
317
+
318
+ progress_bar_config = dict(**self._progress_bar_config)
319
+ progress_bar_config["desc"] = progress_bar_config.get("desc", desc)
320
+ progress_bar_config["leave"] = progress_bar_config.get("leave", leave)
321
+ if iterable is not None:
322
+ return tqdm(iterable, **progress_bar_config)
323
+ elif total is not None:
324
+ return tqdm(total=total, **progress_bar_config)
325
+ else:
326
+ raise ValueError("Either `total` or `iterable` has to be defined.")
327
+
328
+ @torch.no_grad()
329
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
330
+ def __call__(
331
+ self,
332
+ image: PipelineImageInput,
333
+ prompt: Union[str, List[str]] = None,
334
+ negative_prompt: Optional[Union[str, List[str]]] = None,
335
+ num_inference_steps: Optional[int] = None,
336
+ ensemble_size: int = 1,
337
+ processing_resolution: Optional[int] = None,
338
+ match_input_resolution: bool = True,
339
+ resample_method_input: str = "bilinear",
340
+ resample_method_output: str = "bilinear",
341
+ batch_size: int = 1,
342
+ ensembling_kwargs: Optional[Dict[str, Any]] = None,
343
+ latents: Optional[Union[torch.Tensor, List[torch.Tensor]]] = None,
344
+ prompt_embeds: Optional[torch.Tensor] = None,
345
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
346
+ num_images_per_prompt: Optional[int] = 1,
347
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
348
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
349
+ output_type: str = "np",
350
+ output_uncertainty: bool = False,
351
+ output_latent: bool = False,
352
+ skip_preprocess: bool = False,
353
+ return_dict: bool = True,
354
+ **kwargs,
355
+ ):
356
+ """
357
+ Function invoked when calling the pipeline.
358
+
359
+ Args:
360
+ image (`PIL.Image.Image`, `np.ndarray`, `torch.Tensor`, `List[PIL.Image.Image]`, `List[np.ndarray]`),
361
+ `List[torch.Tensor]`: An input image or images used as an input for the normals estimation task. For
362
+ arrays and tensors, the expected value range is between `[0, 1]`. Passing a batch of images is possible
363
+ by providing a four-dimensional array or a tensor. Additionally, a list of images of two- or
364
+ three-dimensional arrays or tensors can be passed. In the latter case, all list elements must have the
365
+ same width and height.
366
+ num_inference_steps (`int`, *optional*, defaults to `None`):
367
+ Number of denoising diffusion steps during inference. The default value `None` results in automatic
368
+ selection. The number of steps should be at least 10 with the full Marigold models, and between 1 and 4
369
+ for Marigold-LCM models.
370
+ ensemble_size (`int`, defaults to `1`):
371
+ Number of ensemble predictions. Recommended values are 5 and higher for better precision, or 1 for
372
+ faster inference.
373
+ processing_resolution (`int`, *optional*, defaults to `None`):
374
+ Effective processing resolution. When set to `0`, matches the larger input image dimension. This
375
+ produces crisper predictions, but may also lead to the overall loss of global context. The default
376
+ value `None` resolves to the optimal value from the model config.
377
+ match_input_resolution (`bool`, *optional*, defaults to `True`):
378
+ When enabled, the output prediction is resized to match the input dimensions. When disabled, the longer
379
+ side of the output will equal to `processing_resolution`.
380
+ resample_method_input (`str`, *optional*, defaults to `"bilinear"`):
381
+ Resampling method used to resize input images to `processing_resolution`. The accepted values are:
382
+ `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`.
383
+ resample_method_output (`str`, *optional*, defaults to `"bilinear"`):
384
+ Resampling method used to resize output predictions to match the input resolution. The accepted values
385
+ are `"nearest"`, `"nearest-exact"`, `"bilinear"`, `"bicubic"`, or `"area"`.
386
+ batch_size (`int`, *optional*, defaults to `1`):
387
+ Batch size; only matters when setting `ensemble_size` or passing a tensor of images.
388
+ ensembling_kwargs (`dict`, *optional*, defaults to `None`)
389
+ Extra dictionary with arguments for precise ensembling control. The following options are available:
390
+ - reduction (`str`, *optional*, defaults to `"closest"`): Defines the ensembling function applied in
391
+ every pixel location, can be either `"closest"` or `"mean"`.
392
+ latents (`torch.Tensor`, *optional*, defaults to `None`):
393
+ Latent noise tensors to replace the random initialization. These can be taken from the previous
394
+ function call's output.
395
+ generator (`torch.Generator`, or `List[torch.Generator]`, *optional*, defaults to `None`):
396
+ Random number generator object to ensure reproducibility.
397
+ output_type (`str`, *optional*, defaults to `"np"`):
398
+ Preferred format of the output's `prediction` and the optional `uncertainty` fields. The accepted
399
+ values are: `"np"` (numpy array) or `"pt"` (torch tensor).
400
+ output_uncertainty (`bool`, *optional*, defaults to `False`):
401
+ When enabled, the output's `uncertainty` field contains the predictive uncertainty map, provided that
402
+ the `ensemble_size` argument is set to a value above 2.
403
+ output_latent (`bool`, *optional*, defaults to `False`):
404
+ When enabled, the output's `latent` field contains the latent codes corresponding to the predictions
405
+ within the ensemble. These codes can be saved, modified, and used for subsequent calls with the
406
+ `latents` argument.
407
+ return_dict (`bool`, *optional*, defaults to `True`):
408
+ Whether or not to return a [`~pipelines.marigold.MarigoldDepthOutput`] instead of a plain tuple.
409
+
410
+ Examples:
411
+
412
+ Returns:
413
+ [`~pipelines.marigold.MarigoldNormalsOutput`] or `tuple`:
414
+ If `return_dict` is `True`, [`~pipelines.marigold.MarigoldNormalsOutput`] is returned, otherwise a
415
+ `tuple` is returned where the first element is the prediction, the second element is the uncertainty
416
+ (or `None`), and the third is the latent (or `None`).
417
+ """
418
+
419
+ # 0. Resolving variables.
420
+ device = self._execution_device
421
+ dtype = self.dtype
422
+
423
+ # Model-specific optimal default values leading to fast and reasonable results.
424
+ if num_inference_steps is None:
425
+ num_inference_steps = self.default_denoising_steps
426
+ if processing_resolution is None:
427
+ processing_resolution = self.default_processing_resolution
428
+
429
+ # 1. Check inputs.
430
+ num_images = self.check_inputs(
431
+ image,
432
+ num_inference_steps,
433
+ ensemble_size,
434
+ processing_resolution,
435
+ resample_method_input,
436
+ resample_method_output,
437
+ batch_size,
438
+ ensembling_kwargs,
439
+ latents,
440
+ generator,
441
+ output_type,
442
+ output_uncertainty,
443
+ )
444
+
445
+
446
+ # 2. Prepare empty text conditioning.
447
+ # Model invocation: self.tokenizer, self.text_encoder.
448
+ if self.empty_text_embedding is None:
449
+ prompt = ""
450
+ text_inputs = self.tokenizer(
451
+ prompt,
452
+ padding="do_not_pad",
453
+ max_length=self.tokenizer.model_max_length,
454
+ truncation=True,
455
+ return_tensors="pt",
456
+ )
457
+ text_input_ids = text_inputs.input_ids.to(device)
458
+ self.empty_text_embedding = self.text_encoder(text_input_ids)[0] # [1,2,1024]
459
+
460
+
461
+
462
+ # 3. prepare prompt
463
+ if self.prompt_embeds is None:
464
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
465
+ self.prompt,
466
+ device,
467
+ num_images_per_prompt,
468
+ False,
469
+ negative_prompt,
470
+ prompt_embeds=prompt_embeds,
471
+ negative_prompt_embeds=None,
472
+ lora_scale=None,
473
+ clip_skip=None,
474
+ )
475
+ self.prompt_embeds = prompt_embeds
476
+ self.negative_prompt_embeds = negative_prompt_embeds
477
+
478
+
479
+
480
+ # 4. Preprocess input images. This function loads input image or images of compatible dimensions `(H, W)`,
481
+ # optionally downsamples them to the `processing_resolution` `(PH, PW)`, where
482
+ # `max(PH, PW) == processing_resolution`, and pads the dimensions to `(PPH, PPW)` such that these values are
483
+ # divisible by the latent space downscaling factor (typically 8 in Stable Diffusion). The default value `None`
484
+ # of `processing_resolution` resolves to the optimal value from the model config. It is a recommended mode of
485
+ # operation and leads to the most reasonable results. Using the native image resolution or any other processing
486
+ # resolution can lead to loss of either fine details or global context in the output predictions.
487
+ if not skip_preprocess:
488
+ image, padding, original_resolution = self.image_processor.preprocess(
489
+ image, processing_resolution, resample_method_input, device, dtype
490
+ ) # [N,3,PPH,PPW]
491
+ else:
492
+ padding = (0, 0)
493
+ original_resolution = image.shape[2:]
494
+ # 5. Encode input image into latent space. At this step, each of the `N` input images is represented with `E`
495
+ # ensemble members. Each ensemble member is an independent diffused prediction, just initialized independently.
496
+ # Latents of each such predictions across all input images and all ensemble members are represented in the
497
+ # `pred_latent` variable. The variable `image_latent` is of the same shape: it contains each input image encoded
498
+ # into latent space and replicated `E` times. The latents can be either generated (see `generator` to ensure
499
+ # reproducibility), or passed explicitly via the `latents` argument. The latter can be set outside the pipeline
500
+ # code. For example, in the Marigold-LCM video processing demo, the latents initialization of a frame is taken
501
+ # as a convex combination of the latents output of the pipeline for the previous frame and a newly-sampled
502
+ # noise. This behavior can be achieved by setting the `output_latent` argument to `True`. The latent space
503
+ # dimensions are `(h, w)`. Encoding into latent space happens in batches of size `batch_size`.
504
+ # Model invocation: self.vae.encoder.
505
+ image_latent, pred_latent = self.prepare_latents(
506
+ image, latents, generator, ensemble_size, batch_size
507
+ ) # [N*E,4,h,w], [N*E,4,h,w]
508
+
509
+ gaus_noise = pred_latent.detach().clone()
510
+ del image
511
+
512
+
513
+ # 6. obtain control_output
514
+
515
+ cond_scale =controlnet_conditioning_scale
516
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
517
+ image_latent.detach(),
518
+ self.t_start,
519
+ encoder_hidden_states=self.prompt_embeds,
520
+ conditioning_scale=cond_scale,
521
+ guess_mode=False,
522
+ return_dict=False,
523
+ )
524
+
525
+ # 7. YOSO sampling
526
+ latent_x_t = self.unet(
527
+ pred_latent,
528
+ self.t_start,
529
+ encoder_hidden_states=self.prompt_embeds,
530
+ down_block_additional_residuals=down_block_res_samples,
531
+ mid_block_additional_residual=mid_block_res_sample,
532
+ return_dict=False,
533
+ )[0]
534
+
535
+
536
+ del (
537
+ pred_latent,
538
+ image_latent,
539
+ )
540
+
541
+ # decoder
542
+ prediction = self.decode_prediction(latent_x_t)
543
+ prediction = self.image_processor.unpad_image(prediction, padding) # [N*E,3,PH,PW]
544
+
545
+ prediction = self.image_processor.resize_antialias(
546
+ prediction, original_resolution, resample_method_output, is_aa=False
547
+ ) # [N,3,H,W]
548
+ prediction = self.normalize_normals(prediction) # [N,3,H,W]
549
+
550
+ if output_type == "np":
551
+ prediction = self.image_processor.pt_to_numpy(prediction) # [N,H,W,3]
552
+
553
+ # 11. Offload all models
554
+ self.maybe_free_model_hooks()
555
+
556
+ return YosoNormalsOutput(
557
+ prediction=prediction,
558
+ latent=latent_x_t,
559
+ gaus_noise=gaus_noise,
560
+ )
561
+
562
+ # Copied from diffusers.pipelines.marigold.pipeline_marigold_depth.MarigoldDepthPipeline.prepare_latents
563
+ def prepare_latents(
564
+ self,
565
+ image: torch.Tensor,
566
+ latents: Optional[torch.Tensor],
567
+ generator: Optional[torch.Generator],
568
+ ensemble_size: int,
569
+ batch_size: int,
570
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
571
+ def retrieve_latents(encoder_output):
572
+ if hasattr(encoder_output, "latent_dist"):
573
+ return encoder_output.latent_dist.mode()
574
+ elif hasattr(encoder_output, "latents"):
575
+ return encoder_output.latents
576
+ else:
577
+ raise AttributeError("Could not access latents of provided encoder_output")
578
+
579
+
580
+
581
+ image_latent = torch.cat(
582
+ [
583
+ retrieve_latents(self.vae.encode(image[i : i + batch_size]))
584
+ for i in range(0, image.shape[0], batch_size)
585
+ ],
586
+ dim=0,
587
+ ) # [N,4,h,w]
588
+ image_latent = image_latent * self.vae.config.scaling_factor
589
+ image_latent = image_latent.repeat_interleave(ensemble_size, dim=0) # [N*E,4,h,w]
590
+
591
+ pred_latent = latents
592
+ if pred_latent is None:
593
+ pred_latent = randn_tensor(
594
+ image_latent.shape,
595
+ generator=generator,
596
+ device=image_latent.device,
597
+ dtype=image_latent.dtype,
598
+ ) # [N*E,4,h,w]
599
+
600
+ return image_latent, pred_latent
601
+
602
+ def decode_prediction(self, pred_latent: torch.Tensor) -> torch.Tensor:
603
+ if pred_latent.dim() != 4 or pred_latent.shape[1] != self.vae.config.latent_channels:
604
+ raise ValueError(
605
+ f"Expecting 4D tensor of shape [B,{self.vae.config.latent_channels},H,W]; got {pred_latent.shape}."
606
+ )
607
+
608
+ prediction = self.vae.decode(pred_latent / self.vae.config.scaling_factor, return_dict=False)[0] # [B,3,H,W]
609
+
610
+ prediction = self.normalize_normals(prediction) # [B,3,H,W]
611
+
612
+ return prediction # [B,3,H,W]
613
+
614
+ @staticmethod
615
+ def normalize_normals(normals: torch.Tensor, eps: float = 1e-6) -> torch.Tensor:
616
+ if normals.dim() != 4 or normals.shape[1] != 3:
617
+ raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.")
618
+
619
+ norm = torch.norm(normals, dim=1, keepdim=True)
620
+ normals /= norm.clamp(min=eps)
621
+
622
+ return normals
623
+
624
+ @staticmethod
625
+ def ensemble_normals(
626
+ normals: torch.Tensor, output_uncertainty: bool, reduction: str = "closest"
627
+ ) -> Tuple[torch.Tensor, Optional[torch.Tensor]]:
628
+ """
629
+ Ensembles the normals maps represented by the `normals` tensor with expected shape `(B, 3, H, W)`, where B is
630
+ the number of ensemble members for a given prediction of size `(H x W)`.
631
+
632
+ Args:
633
+ normals (`torch.Tensor`):
634
+ Input ensemble normals maps.
635
+ output_uncertainty (`bool`, *optional*, defaults to `False`):
636
+ Whether to output uncertainty map.
637
+ reduction (`str`, *optional*, defaults to `"closest"`):
638
+ Reduction method used to ensemble aligned predictions. The accepted values are: `"closest"` and
639
+ `"mean"`.
640
+
641
+ Returns:
642
+ A tensor of aligned and ensembled normals maps with shape `(1, 3, H, W)` and optionally a tensor of
643
+ uncertainties of shape `(1, 1, H, W)`.
644
+ """
645
+ if normals.dim() != 4 or normals.shape[1] != 3:
646
+ raise ValueError(f"Expecting 4D tensor of shape [B,3,H,W]; got {normals.shape}.")
647
+ if reduction not in ("closest", "mean"):
648
+ raise ValueError(f"Unrecognized reduction method: {reduction}.")
649
+
650
+ mean_normals = normals.mean(dim=0, keepdim=True) # [1,3,H,W]
651
+ mean_normals = MarigoldNormalsPipeline.normalize_normals(mean_normals) # [1,3,H,W]
652
+
653
+ sim_cos = (mean_normals * normals).sum(dim=1, keepdim=True) # [E,1,H,W]
654
+ sim_cos = sim_cos.clamp(-1, 1) # required to avoid NaN in uncertainty with fp16
655
+
656
+ uncertainty = None
657
+ if output_uncertainty:
658
+ uncertainty = sim_cos.arccos() # [E,1,H,W]
659
+ uncertainty = uncertainty.mean(dim=0, keepdim=True) / np.pi # [1,1,H,W]
660
+
661
+ if reduction == "mean":
662
+ return mean_normals, uncertainty # [1,3,H,W], [1,1,H,W]
663
+
664
+ closest_indices = sim_cos.argmax(dim=0, keepdim=True) # [1,1,H,W]
665
+ closest_indices = closest_indices.repeat(1, 3, 1, 1) # [1,3,H,W]
666
+ closest_normals = torch.gather(normals, 0, closest_indices) # [1,3,H,W]
667
+
668
+ return closest_normals, uncertainty # [1,3,H,W], [1,1,H,W]
669
+
670
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
671
+ def retrieve_timesteps(
672
+ scheduler,
673
+ num_inference_steps: Optional[int] = None,
674
+ device: Optional[Union[str, torch.device]] = None,
675
+ timesteps: Optional[List[int]] = None,
676
+ sigmas: Optional[List[float]] = None,
677
+ **kwargs,
678
+ ):
679
+ """
680
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
681
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
682
+
683
+ Args:
684
+ scheduler (`SchedulerMixin`):
685
+ The scheduler to get timesteps from.
686
+ num_inference_steps (`int`):
687
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
688
+ must be `None`.
689
+ device (`str` or `torch.device`, *optional*):
690
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
691
+ timesteps (`List[int]`, *optional*):
692
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
693
+ `num_inference_steps` and `sigmas` must be `None`.
694
+ sigmas (`List[float]`, *optional*):
695
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
696
+ `num_inference_steps` and `timesteps` must be `None`.
697
+
698
+ Returns:
699
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
700
+ second element is the number of inference steps.
701
+ """
702
+ if timesteps is not None and sigmas is not None:
703
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
704
+ if timesteps is not None:
705
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
706
+ if not accepts_timesteps:
707
+ raise ValueError(
708
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
709
+ f" timestep schedules. Please check whether you are using the correct scheduler."
710
+ )
711
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
712
+ timesteps = scheduler.timesteps
713
+ num_inference_steps = len(timesteps)
714
+ elif sigmas is not None:
715
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
716
+ if not accept_sigmas:
717
+ raise ValueError(
718
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
719
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
720
+ )
721
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
722
+ timesteps = scheduler.timesteps
723
+ num_inference_steps = len(timesteps)
724
+ else:
725
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
726
+ timesteps = scheduler.timesteps
727
+ return timesteps, num_inference_steps
stablenormal/scheduler/__init__.py ADDED
File without changes
stablenormal/scheduler/heuristics_ddimsampler.py ADDED
@@ -0,0 +1,243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import List, Optional, Tuple, Union
4
+
5
+ import numpy as np
6
+ import torch
7
+ from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput, DDIMScheduler
8
+ from diffusers.schedulers.scheduling_utils import SchedulerMixin
9
+ from diffusers.configuration_utils import register_to_config, ConfigMixin
10
+ import pdb
11
+
12
+
13
+ class HEURI_DDIMScheduler(DDIMScheduler, SchedulerMixin, ConfigMixin):
14
+
15
+ def set_timesteps(self, num_inference_steps: int, t_start: int, device: Union[str, torch.device] = None):
16
+ """
17
+ Sets the discrete timesteps used for the diffusion chain (to be run before inference).
18
+
19
+ Args:
20
+ num_inference_steps (`int`):
21
+ The number of diffusion steps used when generating samples with a pre-trained model.
22
+ """
23
+
24
+ if num_inference_steps > self.config.num_train_timesteps:
25
+ raise ValueError(
26
+ f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:"
27
+ f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle"
28
+ f" maximal {self.config.num_train_timesteps} timesteps."
29
+ )
30
+
31
+ self.num_inference_steps = num_inference_steps
32
+
33
+ # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891
34
+ if self.config.timestep_spacing == "linspace":
35
+ timesteps = (
36
+ np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps)
37
+ .round()[::-1]
38
+ .copy()
39
+ .astype(np.int64)
40
+ )
41
+ elif self.config.timestep_spacing == "leading":
42
+ step_ratio = self.config.num_train_timesteps // self.num_inference_steps
43
+ # creates integer timesteps by multiplying by ratio
44
+ # casting to int to avoid issues when num_inference_step is power of 3
45
+ timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(np.int64)
46
+ timesteps += self.config.steps_offset
47
+ elif self.config.timestep_spacing == "trailing":
48
+ step_ratio = self.config.num_train_timesteps / self.num_inference_steps
49
+ # creates integer timesteps by multiplying by ratio
50
+ # casting to int to avoid issues when num_inference_step is power of 3
51
+ timesteps = np.round(np.arange(self.config.num_train_timesteps, 0, -step_ratio)).astype(np.int64)
52
+ timesteps -= 1
53
+ else:
54
+ raise ValueError(
55
+ f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'leading' or 'trailing'."
56
+ )
57
+
58
+ timesteps = torch.from_numpy(timesteps).to(device)
59
+
60
+
61
+ naive_sampling_step = num_inference_steps //2
62
+
63
+ # TODO for debug
64
+ # naive_sampling_step = 0
65
+
66
+ self.naive_sampling_step = naive_sampling_step
67
+
68
+ timesteps[:naive_sampling_step] = timesteps[naive_sampling_step] # refine on step 5 for 5 steps, then backward from step 6
69
+
70
+ timesteps = [timestep + 1 for timestep in timesteps]
71
+
72
+ self.timesteps = timesteps
73
+ self.gap = self.config.num_train_timesteps // self.num_inference_steps
74
+ self.prev_timesteps = [timestep for timestep in self.timesteps[1:]]
75
+ self.prev_timesteps.append(torch.zeros_like(self.prev_timesteps[-1]))
76
+
77
+ def step(
78
+ self,
79
+ model_output: torch.Tensor,
80
+ timestep: int,
81
+ prev_timestep: int,
82
+ sample: torch.Tensor,
83
+ eta: float = 0.0,
84
+ use_clipped_model_output: bool = False,
85
+ generator=None,
86
+ cur_step=None,
87
+ variance_noise: Optional[torch.Tensor] = None,
88
+ gaus_noise: Optional[torch.Tensor] = None,
89
+ return_dict: bool = True,
90
+ ) -> Union[DDIMSchedulerOutput, Tuple]:
91
+ """
92
+ Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion
93
+ process from the learned model outputs (most often the predicted noise).
94
+
95
+ Args:
96
+ model_output (`torch.Tensor`):
97
+ The direct output from learned diffusion model.
98
+ timestep (`float`):
99
+ The current discrete timestep in the diffusion chain.
100
+ pre_timestep (`float`):
101
+ next_timestep
102
+ sample (`torch.Tensor`):
103
+ A current instance of a sample created by the diffusion process.
104
+ eta (`float`):
105
+ The weight of noise for added noise in diffusion step.
106
+ use_clipped_model_output (`bool`, defaults to `False`):
107
+ If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary
108
+ because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no
109
+ clipping has happened, "corrected" `model_output` would coincide with the one provided as input and
110
+ `use_clipped_model_output` has no effect.
111
+ generator (`torch.Generator`, *optional*):
112
+ A random number generator.
113
+ variance_noise (`torch.Tensor`):
114
+ Alternative to generating noise with `generator` by directly providing the noise for the variance
115
+ itself. Useful for methods such as [`CycleDiffusion`].
116
+ return_dict (`bool`, *optional*, defaults to `True`):
117
+ Whether or not to return a [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] or `tuple`.
118
+
119
+ Returns:
120
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
121
+ If return_dict is `True`, [`~schedulers.scheduling_ddim.DDIMSchedulerOutput`] is returned, otherwise a
122
+ tuple is returned where the first element is the sample tensor.
123
+
124
+ """
125
+ if self.num_inference_steps is None:
126
+ raise ValueError(
127
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
128
+ )
129
+
130
+ # See formulas (12) and (16) of DDIM paper https://arxiv.org/pdf/2010.02502.pdf
131
+ # Ideally, read DDIM paper in-detail understanding
132
+
133
+ # Notation (<variable name> -> <name in paper>
134
+ # - pred_noise_t -> e_theta(x_t, t)
135
+ # - pred_original_sample -> f_theta(x_t, t) or x_0
136
+ # - std_dev_t -> sigma_t
137
+ # - eta -> Ξ·
138
+ # - pred_sample_direction -> "direction pointing to x_t"
139
+ # - pred_prev_sample -> "x_t-1"
140
+
141
+ # 1. get previous step value (=t-1)
142
+
143
+ # trick from heuri_sampling
144
+ if cur_step == self.naive_sampling_step and timestep == prev_timestep:
145
+ timestep += self.gap
146
+
147
+
148
+ prev_timestep = prev_timestep # NOTE naive sampling
149
+
150
+ # 2. compute alphas, betas
151
+ alpha_prod_t = self.alphas_cumprod[timestep]
152
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
153
+
154
+ beta_prod_t = 1 - alpha_prod_t
155
+
156
+ # 3. compute predicted original sample from predicted noise also called
157
+ # "predicted x_0" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
158
+ if self.config.prediction_type == "epsilon":
159
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
160
+ pred_epsilon = model_output
161
+ elif self.config.prediction_type == "sample":
162
+ pred_original_sample = model_output
163
+ pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
164
+ elif self.config.prediction_type == "v_prediction":
165
+ pred_original_sample = (alpha_prod_t**0.5) * sample - (beta_prod_t**0.5) * model_output
166
+ pred_epsilon = (alpha_prod_t**0.5) * model_output + (beta_prod_t**0.5) * sample
167
+ else:
168
+ raise ValueError(
169
+ f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, `sample`, or"
170
+ " `v_prediction`"
171
+ )
172
+
173
+ # 4. Clip or threshold "predicted x_0"
174
+ if self.config.thresholding:
175
+ pred_original_sample = self._threshold_sample(pred_original_sample)
176
+
177
+ # 5. compute variance: "sigma_t(Ξ·)" -> see formula (16)
178
+ # Οƒ_t = sqrt((1 βˆ’ Ξ±_tβˆ’1)/(1 βˆ’ Ξ±_t)) * sqrt(1 βˆ’ Ξ±_t/Ξ±_tβˆ’1)
179
+ variance = self._get_variance(timestep, prev_timestep)
180
+ std_dev_t = eta * variance ** (0.5)
181
+
182
+
183
+ if use_clipped_model_output:
184
+ # the pred_epsilon is always re-derived from the clipped x_0 in Glide
185
+ pred_epsilon = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
186
+
187
+ # 6. compute "direction pointing to x_t" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
188
+ pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * pred_epsilon
189
+
190
+ # 7. compute x_t without "random noise" of formula (12) from https://arxiv.org/pdf/2010.02502.pdf
191
+ prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
192
+
193
+ if eta > 0:
194
+ if variance_noise is not None and generator is not None:
195
+ raise ValueError(
196
+ "Cannot pass both generator and variance_noise. Please make sure that either `generator` or"
197
+ " `variance_noise` stays `None`."
198
+ )
199
+
200
+ if variance_noise is None:
201
+ variance_noise = randn_tensor(
202
+ model_output.shape, generator=generator, device=model_output.device, dtype=model_output.dtype
203
+ )
204
+ variance = std_dev_t * variance_noise
205
+
206
+ prev_sample = prev_sample + variance
207
+
208
+ if cur_step < self.naive_sampling_step:
209
+ prev_sample = self.add_noise(pred_original_sample, torch.randn_like(pred_original_sample), timestep)
210
+
211
+ if not return_dict:
212
+ return (prev_sample,)
213
+
214
+
215
+ return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
216
+
217
+
218
+
219
+ def add_noise(
220
+ self,
221
+ original_samples: torch.Tensor,
222
+ noise: torch.Tensor,
223
+ timesteps: torch.IntTensor,
224
+ ) -> torch.Tensor:
225
+ # Make sure alphas_cumprod and timestep have same device and dtype as original_samples
226
+ # Move the self.alphas_cumprod to device to avoid redundant CPU to GPU data movement
227
+ # for the subsequent add_noise calls
228
+ self.alphas_cumprod = self.alphas_cumprod.to(device=original_samples.device)
229
+ alphas_cumprod = self.alphas_cumprod.to(dtype=original_samples.dtype)
230
+ timesteps = timesteps.to(original_samples.device)
231
+
232
+ sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5
233
+ sqrt_alpha_prod = sqrt_alpha_prod.flatten()
234
+ while len(sqrt_alpha_prod.shape) < len(original_samples.shape):
235
+ sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1)
236
+
237
+ sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5
238
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten()
239
+ while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape):
240
+ sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1)
241
+
242
+ noisy_samples = sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise
243
+ return noisy_samples
stablenormal/stablecontrolnet.py ADDED
@@ -0,0 +1,1354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+
16
+ import inspect
17
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
18
+
19
+ import numpy as np
20
+ import PIL.Image
21
+ import torch
22
+ import torch.nn.functional as F
23
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
24
+
25
+ from ...callbacks import MultiPipelineCallbacks, PipelineCallback
26
+ from ...image_processor import PipelineImageInput, VaeImageProcessor
27
+ from ...loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
+ from ...models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel
29
+ from ...models.lora import adjust_lora_scale_text_encoder
30
+ from ...schedulers import KarrasDiffusionSchedulers
31
+ from ...utils import (
32
+ USE_PEFT_BACKEND,
33
+ deprecate,
34
+ logging,
35
+ replace_example_docstring,
36
+ scale_lora_layers,
37
+ unscale_lora_layers,
38
+ )
39
+ from ...utils.torch_utils import is_compiled_module, is_torch_version, randn_tensor
40
+ from ..pipeline_utils import DiffusionPipeline, StableDiffusionMixin
41
+ from ..stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
42
+ from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker
43
+ from .multicontrolnet import MultiControlNetModel
44
+
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ EXAMPLE_DOC_STRING = """
50
+ Examples:
51
+ ```py
52
+ >>> # !pip install opencv-python transformers accelerate
53
+ >>> from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCMultistepScheduler
54
+ >>> from diffusers.utils import load_image
55
+ >>> import numpy as np
56
+ >>> import torch
57
+
58
+ >>> import cv2
59
+ >>> from PIL import Image
60
+
61
+ >>> # download an image
62
+ >>> image = load_image(
63
+ ... "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png"
64
+ ... )
65
+ >>> image = np.array(image)
66
+
67
+ >>> # get canny image
68
+ >>> image = cv2.Canny(image, 100, 200)
69
+ >>> image = image[:, :, None]
70
+ >>> image = np.concatenate([image, image, image], axis=2)
71
+ >>> canny_image = Image.fromarray(image)
72
+
73
+ >>> # load control net and stable diffusion v1-5
74
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
75
+ >>> pipe = StableDiffusionControlNetPipeline.from_pretrained(
76
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
77
+ ... )
78
+
79
+ >>> # speed up diffusion process with faster scheduler and memory optimization
80
+ >>> pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)
81
+ >>> # remove following line if xformers is not installed
82
+ >>> pipe.enable_xformers_memory_efficient_attention()
83
+
84
+ >>> pipe.enable_model_cpu_offload()
85
+
86
+ >>> # generate image
87
+ >>> generator = torch.manual_seed(0)
88
+ >>> image = pipe(
89
+ ... "futuristic-looking woman", num_inference_steps=20, generator=generator, image=canny_image
90
+ ... ).images[0]
91
+ ```
92
+ """
93
+
94
+
95
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
96
+ def retrieve_timesteps(
97
+ scheduler,
98
+ num_inference_steps: Optional[int] = None,
99
+ device: Optional[Union[str, torch.device]] = None,
100
+ timesteps: Optional[List[int]] = None,
101
+ sigmas: Optional[List[float]] = None,
102
+ **kwargs,
103
+ ):
104
+ """
105
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
106
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
107
+
108
+ Args:
109
+ scheduler (`SchedulerMixin`):
110
+ The scheduler to get timesteps from.
111
+ num_inference_steps (`int`):
112
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
113
+ must be `None`.
114
+ device (`str` or `torch.device`, *optional*):
115
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
116
+ timesteps (`List[int]`, *optional*):
117
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
118
+ `num_inference_steps` and `sigmas` must be `None`.
119
+ sigmas (`List[float]`, *optional*):
120
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
121
+ `num_inference_steps` and `timesteps` must be `None`.
122
+
123
+ Returns:
124
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
125
+ second element is the number of inference steps.
126
+ """
127
+ if timesteps is not None and sigmas is not None:
128
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
129
+ if timesteps is not None:
130
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
131
+ if not accepts_timesteps:
132
+ raise ValueError(
133
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
134
+ f" timestep schedules. Please check whether you are using the correct scheduler."
135
+ )
136
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
137
+ timesteps = scheduler.timesteps
138
+ num_inference_steps = len(timesteps)
139
+ elif sigmas is not None:
140
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
141
+ if not accept_sigmas:
142
+ raise ValueError(
143
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
144
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
145
+ )
146
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
147
+ timesteps = scheduler.timesteps
148
+ num_inference_steps = len(timesteps)
149
+ else:
150
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
151
+ timesteps = scheduler.timesteps
152
+ return timesteps, num_inference_steps
153
+
154
+
155
+ class StableDiffusionControlNetPipeline(
156
+ DiffusionPipeline,
157
+ StableDiffusionMixin,
158
+ TextualInversionLoaderMixin,
159
+ LoraLoaderMixin,
160
+ IPAdapterMixin,
161
+ FromSingleFileMixin,
162
+ ):
163
+ r"""
164
+ Pipeline for text-to-image generation using Stable Diffusion with ControlNet guidance.
165
+
166
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
167
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
168
+
169
+ The pipeline also inherits the following loading methods:
170
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
171
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
172
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
173
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
174
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
175
+
176
+ Args:
177
+ vae ([`AutoencoderKL`]):
178
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
179
+ text_encoder ([`~transformers.CLIPTextModel`]):
180
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
181
+ tokenizer ([`~transformers.CLIPTokenizer`]):
182
+ A `CLIPTokenizer` to tokenize text.
183
+ unet ([`UNet2DConditionModel`]):
184
+ A `UNet2DConditionModel` to denoise the encoded image latents.
185
+ controlnet ([`ControlNetModel`] or `List[ControlNetModel]`):
186
+ Provides additional conditioning to the `unet` during the denoising process. If you set multiple
187
+ ControlNets as a list, the outputs from each ControlNet are added together to create one combined
188
+ additional conditioning.
189
+ scheduler ([`SchedulerMixin`]):
190
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
191
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
192
+ safety_checker ([`StableDiffusionSafetyChecker`]):
193
+ Classification module that estimates whether generated images could be considered offensive or harmful.
194
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
195
+ about a model's potential harms.
196
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
197
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
198
+ """
199
+
200
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
201
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
202
+ _exclude_from_cpu_offload = ["safety_checker"]
203
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
204
+
205
+ def __init__(
206
+ self,
207
+ vae: AutoencoderKL,
208
+ text_encoder: CLIPTextModel,
209
+ tokenizer: CLIPTokenizer,
210
+ unet: UNet2DConditionModel,
211
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
212
+ scheduler: KarrasDiffusionSchedulers,
213
+ safety_checker: StableDiffusionSafetyChecker,
214
+ feature_extractor: CLIPImageProcessor,
215
+ image_encoder: CLIPVisionModelWithProjection = None,
216
+ requires_safety_checker: bool = True,
217
+ ):
218
+ super().__init__()
219
+
220
+ if safety_checker is None and requires_safety_checker:
221
+ logger.warning(
222
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
223
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
224
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
225
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
226
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
227
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
228
+ )
229
+
230
+ if safety_checker is not None and feature_extractor is None:
231
+ raise ValueError(
232
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
233
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
234
+ )
235
+
236
+ if isinstance(controlnet, (list, tuple)):
237
+ controlnet = MultiControlNetModel(controlnet)
238
+
239
+ self.register_modules(
240
+ vae=vae,
241
+ text_encoder=text_encoder,
242
+ tokenizer=tokenizer,
243
+ unet=unet,
244
+ controlnet=controlnet,
245
+ scheduler=scheduler,
246
+ safety_checker=safety_checker,
247
+ feature_extractor=feature_extractor,
248
+ image_encoder=image_encoder,
249
+ )
250
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
251
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True)
252
+ self.control_image_processor = VaeImageProcessor(
253
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
254
+ )
255
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
256
+
257
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
258
+ def _encode_prompt(
259
+ self,
260
+ prompt,
261
+ device,
262
+ num_images_per_prompt,
263
+ do_classifier_free_guidance,
264
+ negative_prompt=None,
265
+ prompt_embeds: Optional[torch.Tensor] = None,
266
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
267
+ lora_scale: Optional[float] = None,
268
+ **kwargs,
269
+ ):
270
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
271
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
272
+
273
+ prompt_embeds_tuple = self.encode_prompt(
274
+ prompt=prompt,
275
+ device=device,
276
+ num_images_per_prompt=num_images_per_prompt,
277
+ do_classifier_free_guidance=do_classifier_free_guidance,
278
+ negative_prompt=negative_prompt,
279
+ prompt_embeds=prompt_embeds,
280
+ negative_prompt_embeds=negative_prompt_embeds,
281
+ lora_scale=lora_scale,
282
+ **kwargs,
283
+ )
284
+
285
+ # concatenate for backwards comp
286
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
287
+
288
+ return prompt_embeds
289
+
290
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt
291
+ def encode_prompt(
292
+ self,
293
+ prompt,
294
+ device,
295
+ num_images_per_prompt,
296
+ do_classifier_free_guidance,
297
+ negative_prompt=None,
298
+ prompt_embeds: Optional[torch.Tensor] = None,
299
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
300
+ lora_scale: Optional[float] = None,
301
+ clip_skip: Optional[int] = None,
302
+ ):
303
+ r"""
304
+ Encodes the prompt into text encoder hidden states.
305
+
306
+ Args:
307
+ prompt (`str` or `List[str]`, *optional*):
308
+ prompt to be encoded
309
+ device: (`torch.device`):
310
+ torch device
311
+ num_images_per_prompt (`int`):
312
+ number of images that should be generated per prompt
313
+ do_classifier_free_guidance (`bool`):
314
+ whether to use classifier free guidance or not
315
+ negative_prompt (`str` or `List[str]`, *optional*):
316
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
317
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
318
+ less than `1`).
319
+ prompt_embeds (`torch.Tensor`, *optional*):
320
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
321
+ provided, text embeddings will be generated from `prompt` input argument.
322
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
323
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
324
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
325
+ argument.
326
+ lora_scale (`float`, *optional*):
327
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
328
+ clip_skip (`int`, *optional*):
329
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
330
+ the output of the pre-final layer will be used for computing the prompt embeddings.
331
+ """
332
+ # set lora scale so that monkey patched LoRA
333
+ # function of text encoder can correctly access it
334
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
335
+ self._lora_scale = lora_scale
336
+
337
+ # dynamically adjust the LoRA scale
338
+ if not USE_PEFT_BACKEND:
339
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
340
+ else:
341
+ scale_lora_layers(self.text_encoder, lora_scale)
342
+
343
+ if prompt is not None and isinstance(prompt, str):
344
+ batch_size = 1
345
+ elif prompt is not None and isinstance(prompt, list):
346
+ batch_size = len(prompt)
347
+ else:
348
+ batch_size = prompt_embeds.shape[0]
349
+
350
+ if prompt_embeds is None:
351
+ # textual inversion: process multi-vector tokens if necessary
352
+ if isinstance(self, TextualInversionLoaderMixin):
353
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
354
+
355
+ text_inputs = self.tokenizer(
356
+ prompt,
357
+ padding="max_length",
358
+ max_length=self.tokenizer.model_max_length,
359
+ truncation=True,
360
+ return_tensors="pt",
361
+ )
362
+ text_input_ids = text_inputs.input_ids
363
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
364
+
365
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
366
+ text_input_ids, untruncated_ids
367
+ ):
368
+ removed_text = self.tokenizer.batch_decode(
369
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
370
+ )
371
+ logger.warning(
372
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
373
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
374
+ )
375
+
376
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
377
+ attention_mask = text_inputs.attention_mask.to(device)
378
+ else:
379
+ attention_mask = None
380
+
381
+ if clip_skip is None:
382
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
383
+ prompt_embeds = prompt_embeds[0]
384
+ else:
385
+ prompt_embeds = self.text_encoder(
386
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
387
+ )
388
+ # Access the `hidden_states` first, that contains a tuple of
389
+ # all the hidden states from the encoder layers. Then index into
390
+ # the tuple to access the hidden states from the desired layer.
391
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
392
+ # We also need to apply the final LayerNorm here to not mess with the
393
+ # representations. The `last_hidden_states` that we typically use for
394
+ # obtaining the final prompt representations passes through the LayerNorm
395
+ # layer.
396
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
397
+
398
+ if self.text_encoder is not None:
399
+ prompt_embeds_dtype = self.text_encoder.dtype
400
+ elif self.unet is not None:
401
+ prompt_embeds_dtype = self.unet.dtype
402
+ else:
403
+ prompt_embeds_dtype = prompt_embeds.dtype
404
+
405
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
406
+
407
+ bs_embed, seq_len, _ = prompt_embeds.shape
408
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
409
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
410
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
411
+
412
+ # get unconditional embeddings for classifier free guidance
413
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
414
+ uncond_tokens: List[str]
415
+ if negative_prompt is None:
416
+ uncond_tokens = [""] * batch_size
417
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
418
+ raise TypeError(
419
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
420
+ f" {type(prompt)}."
421
+ )
422
+ elif isinstance(negative_prompt, str):
423
+ uncond_tokens = [negative_prompt]
424
+ elif batch_size != len(negative_prompt):
425
+ raise ValueError(
426
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
427
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
428
+ " the batch size of `prompt`."
429
+ )
430
+ else:
431
+ uncond_tokens = negative_prompt
432
+
433
+ # textual inversion: process multi-vector tokens if necessary
434
+ if isinstance(self, TextualInversionLoaderMixin):
435
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
436
+
437
+ max_length = prompt_embeds.shape[1]
438
+ uncond_input = self.tokenizer(
439
+ uncond_tokens,
440
+ padding="max_length",
441
+ max_length=max_length,
442
+ truncation=True,
443
+ return_tensors="pt",
444
+ )
445
+
446
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
447
+ attention_mask = uncond_input.attention_mask.to(device)
448
+ else:
449
+ attention_mask = None
450
+
451
+ negative_prompt_embeds = self.text_encoder(
452
+ uncond_input.input_ids.to(device),
453
+ attention_mask=attention_mask,
454
+ )
455
+ negative_prompt_embeds = negative_prompt_embeds[0]
456
+
457
+ if do_classifier_free_guidance:
458
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
459
+ seq_len = negative_prompt_embeds.shape[1]
460
+
461
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
462
+
463
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
464
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
465
+
466
+ if self.text_encoder is not None:
467
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
468
+ # Retrieve the original scale by scaling back the LoRA layers
469
+ unscale_lora_layers(self.text_encoder, lora_scale)
470
+
471
+ return prompt_embeds, negative_prompt_embeds
472
+
473
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
474
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
475
+ dtype = next(self.image_encoder.parameters()).dtype
476
+
477
+ if not isinstance(image, torch.Tensor):
478
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
479
+
480
+ image = image.to(device=device, dtype=dtype)
481
+ if output_hidden_states:
482
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
483
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
484
+ uncond_image_enc_hidden_states = self.image_encoder(
485
+ torch.zeros_like(image), output_hidden_states=True
486
+ ).hidden_states[-2]
487
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
488
+ num_images_per_prompt, dim=0
489
+ )
490
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
491
+ else:
492
+ image_embeds = self.image_encoder(image).image_embeds
493
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
494
+ uncond_image_embeds = torch.zeros_like(image_embeds)
495
+
496
+ return image_embeds, uncond_image_embeds
497
+
498
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
499
+ def prepare_ip_adapter_image_embeds(
500
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
501
+ ):
502
+ if ip_adapter_image_embeds is None:
503
+ if not isinstance(ip_adapter_image, list):
504
+ ip_adapter_image = [ip_adapter_image]
505
+
506
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
507
+ raise ValueError(
508
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
509
+ )
510
+
511
+ image_embeds = []
512
+ for single_ip_adapter_image, image_proj_layer in zip(
513
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
514
+ ):
515
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
516
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
517
+ single_ip_adapter_image, device, 1, output_hidden_state
518
+ )
519
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
520
+ single_negative_image_embeds = torch.stack(
521
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
522
+ )
523
+
524
+ if do_classifier_free_guidance:
525
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
526
+ single_image_embeds = single_image_embeds.to(device)
527
+
528
+ image_embeds.append(single_image_embeds)
529
+ else:
530
+ repeat_dims = [1]
531
+ image_embeds = []
532
+ for single_image_embeds in ip_adapter_image_embeds:
533
+ if do_classifier_free_guidance:
534
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
535
+ single_image_embeds = single_image_embeds.repeat(
536
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
537
+ )
538
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
539
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
540
+ )
541
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
542
+ else:
543
+ single_image_embeds = single_image_embeds.repeat(
544
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
545
+ )
546
+ image_embeds.append(single_image_embeds)
547
+
548
+ return image_embeds
549
+
550
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
551
+ def run_safety_checker(self, image, device, dtype):
552
+ if self.safety_checker is None:
553
+ has_nsfw_concept = None
554
+ else:
555
+ if torch.is_tensor(image):
556
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
557
+ else:
558
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
559
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
560
+ image, has_nsfw_concept = self.safety_checker(
561
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
562
+ )
563
+ return image, has_nsfw_concept
564
+
565
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents
566
+ def decode_latents(self, latents):
567
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
568
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
569
+
570
+ latents = 1 / self.vae.config.scaling_factor * latents
571
+ image = self.vae.decode(latents, return_dict=False)[0]
572
+ image = (image / 2 + 0.5).clamp(0, 1)
573
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
574
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
575
+ return image
576
+
577
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
578
+ def prepare_extra_step_kwargs(self, generator, eta):
579
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
580
+ # eta (Ξ·) is only used with the DDIMScheduler, it will be ignored for other schedulers.
581
+ # eta corresponds to Ξ· in DDIM paper: https://arxiv.org/abs/2010.02502
582
+ # and should be between [0, 1]
583
+
584
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
585
+ extra_step_kwargs = {}
586
+ if accepts_eta:
587
+ extra_step_kwargs["eta"] = eta
588
+
589
+ # check if the scheduler accepts generator
590
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
591
+ if accepts_generator:
592
+ extra_step_kwargs["generator"] = generator
593
+ return extra_step_kwargs
594
+
595
+ def check_inputs(
596
+ self,
597
+ prompt,
598
+ image,
599
+ callback_steps,
600
+ negative_prompt=None,
601
+ prompt_embeds=None,
602
+ negative_prompt_embeds=None,
603
+ ip_adapter_image=None,
604
+ ip_adapter_image_embeds=None,
605
+ controlnet_conditioning_scale=1.0,
606
+ control_guidance_start=0.0,
607
+ control_guidance_end=1.0,
608
+ callback_on_step_end_tensor_inputs=None,
609
+ ):
610
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
611
+ raise ValueError(
612
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
613
+ f" {type(callback_steps)}."
614
+ )
615
+
616
+ if callback_on_step_end_tensor_inputs is not None and not all(
617
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
618
+ ):
619
+ raise ValueError(
620
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
621
+ )
622
+
623
+ if prompt is not None and prompt_embeds is not None:
624
+ raise ValueError(
625
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
626
+ " only forward one of the two."
627
+ )
628
+ elif prompt is None and prompt_embeds is None:
629
+ raise ValueError(
630
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
631
+ )
632
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
633
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
634
+
635
+ if negative_prompt is not None and negative_prompt_embeds is not None:
636
+ raise ValueError(
637
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
638
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
639
+ )
640
+
641
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
642
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
643
+ raise ValueError(
644
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
645
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
646
+ f" {negative_prompt_embeds.shape}."
647
+ )
648
+
649
+ # Check `image`
650
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
651
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
652
+ )
653
+ if (
654
+ isinstance(self.controlnet, ControlNetModel)
655
+ or is_compiled
656
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
657
+ ):
658
+ self.check_image(image, prompt, prompt_embeds)
659
+ elif (
660
+ isinstance(self.controlnet, MultiControlNetModel)
661
+ or is_compiled
662
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
663
+ ):
664
+ if not isinstance(image, list):
665
+ raise TypeError("For multiple controlnets: `image` must be type `list`")
666
+
667
+ # When `image` is a nested list:
668
+ # (e.g. [[canny_image_1, pose_image_1], [canny_image_2, pose_image_2]])
669
+ elif any(isinstance(i, list) for i in image):
670
+ transposed_image = [list(t) for t in zip(*image)]
671
+ if len(transposed_image) != len(self.controlnet.nets):
672
+ raise ValueError(
673
+ f"For multiple controlnets: if you pass`image` as a list of list, each sublist must have the same length as the number of controlnets, but the sublists in `image` got {len(transposed_image)} images and {len(self.controlnet.nets)} ControlNets."
674
+ )
675
+ for image_ in transposed_image:
676
+ self.check_image(image_, prompt, prompt_embeds)
677
+ elif len(image) != len(self.controlnet.nets):
678
+ raise ValueError(
679
+ f"For multiple controlnets: `image` must have the same length as the number of controlnets, but got {len(image)} images and {len(self.controlnet.nets)} ControlNets."
680
+ )
681
+ else:
682
+ for image_ in image:
683
+ self.check_image(image_, prompt, prompt_embeds)
684
+ else:
685
+ assert False
686
+
687
+ # Check `controlnet_conditioning_scale`
688
+ if (
689
+ isinstance(self.controlnet, ControlNetModel)
690
+ or is_compiled
691
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
692
+ ):
693
+ if not isinstance(controlnet_conditioning_scale, float):
694
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
695
+ elif (
696
+ isinstance(self.controlnet, MultiControlNetModel)
697
+ or is_compiled
698
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
699
+ ):
700
+ if isinstance(controlnet_conditioning_scale, list):
701
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
702
+ raise ValueError(
703
+ "A single batch of varying conditioning scale settings (e.g. [[1.0, 0.5], [0.2, 0.8]]) is not supported at the moment. "
704
+ "The conditioning scale must be fixed across the batch."
705
+ )
706
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
707
+ self.controlnet.nets
708
+ ):
709
+ raise ValueError(
710
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
711
+ " the same length as the number of controlnets"
712
+ )
713
+ else:
714
+ assert False
715
+
716
+ if not isinstance(control_guidance_start, (tuple, list)):
717
+ control_guidance_start = [control_guidance_start]
718
+
719
+ if not isinstance(control_guidance_end, (tuple, list)):
720
+ control_guidance_end = [control_guidance_end]
721
+
722
+ if len(control_guidance_start) != len(control_guidance_end):
723
+ raise ValueError(
724
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
725
+ )
726
+
727
+ if isinstance(self.controlnet, MultiControlNetModel):
728
+ if len(control_guidance_start) != len(self.controlnet.nets):
729
+ raise ValueError(
730
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
731
+ )
732
+
733
+ for start, end in zip(control_guidance_start, control_guidance_end):
734
+ if start >= end:
735
+ raise ValueError(
736
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
737
+ )
738
+ if start < 0.0:
739
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
740
+ if end > 1.0:
741
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
742
+
743
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
744
+ raise ValueError(
745
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
746
+ )
747
+
748
+ if ip_adapter_image_embeds is not None:
749
+ if not isinstance(ip_adapter_image_embeds, list):
750
+ raise ValueError(
751
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
752
+ )
753
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
754
+ raise ValueError(
755
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
756
+ )
757
+
758
+ def check_image(self, image, prompt, prompt_embeds):
759
+ image_is_pil = isinstance(image, PIL.Image.Image)
760
+ image_is_tensor = isinstance(image, torch.Tensor)
761
+ image_is_np = isinstance(image, np.ndarray)
762
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
763
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
764
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
765
+
766
+ if (
767
+ not image_is_pil
768
+ and not image_is_tensor
769
+ and not image_is_np
770
+ and not image_is_pil_list
771
+ and not image_is_tensor_list
772
+ and not image_is_np_list
773
+ ):
774
+ raise TypeError(
775
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
776
+ )
777
+
778
+ if image_is_pil:
779
+ image_batch_size = 1
780
+ else:
781
+ image_batch_size = len(image)
782
+
783
+ if prompt is not None and isinstance(prompt, str):
784
+ prompt_batch_size = 1
785
+ elif prompt is not None and isinstance(prompt, list):
786
+ prompt_batch_size = len(prompt)
787
+ elif prompt_embeds is not None:
788
+ prompt_batch_size = prompt_embeds.shape[0]
789
+
790
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
791
+ raise ValueError(
792
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
793
+ )
794
+
795
+ def prepare_image(
796
+ self,
797
+ image,
798
+ width,
799
+ height,
800
+ batch_size,
801
+ num_images_per_prompt,
802
+ device,
803
+ dtype,
804
+ do_classifier_free_guidance=False,
805
+ guess_mode=False,
806
+ ):
807
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
808
+ image_batch_size = image.shape[0]
809
+
810
+ if image_batch_size == 1:
811
+ repeat_by = batch_size
812
+ else:
813
+ # image batch size is the same as prompt batch size
814
+ repeat_by = num_images_per_prompt
815
+
816
+ image = image.repeat_interleave(repeat_by, dim=0)
817
+
818
+ image = image.to(device=device, dtype=dtype)
819
+
820
+ if do_classifier_free_guidance and not guess_mode:
821
+ image = torch.cat([image] * 2)
822
+
823
+ return image
824
+
825
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents
826
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
827
+ shape = (
828
+ batch_size,
829
+ num_channels_latents,
830
+ int(height) // self.vae_scale_factor,
831
+ int(width) // self.vae_scale_factor,
832
+ )
833
+ if isinstance(generator, list) and len(generator) != batch_size:
834
+ raise ValueError(
835
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
836
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
837
+ )
838
+
839
+ if latents is None:
840
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
841
+ else:
842
+ latents = latents.to(device)
843
+
844
+ # scale the initial noise by the standard deviation required by the scheduler
845
+ latents = latents * self.scheduler.init_noise_sigma
846
+ return latents
847
+
848
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
849
+ def get_guidance_scale_embedding(
850
+ self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
851
+ ) -> torch.Tensor:
852
+ """
853
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
854
+
855
+ Args:
856
+ w (`torch.Tensor`):
857
+ Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
858
+ embedding_dim (`int`, *optional*, defaults to 512):
859
+ Dimension of the embeddings to generate.
860
+ dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
861
+ Data type of the generated embeddings.
862
+
863
+ Returns:
864
+ `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
865
+ """
866
+ assert len(w.shape) == 1
867
+ w = w * 1000.0
868
+
869
+ half_dim = embedding_dim // 2
870
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
871
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
872
+ emb = w.to(dtype)[:, None] * emb[None, :]
873
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
874
+ if embedding_dim % 2 == 1: # zero pad
875
+ emb = torch.nn.functional.pad(emb, (0, 1))
876
+ assert emb.shape == (w.shape[0], embedding_dim)
877
+ return emb
878
+
879
+ @property
880
+ def guidance_scale(self):
881
+ return self._guidance_scale
882
+
883
+ @property
884
+ def clip_skip(self):
885
+ return self._clip_skip
886
+
887
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
888
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
889
+ # corresponds to doing no classifier free guidance.
890
+ @property
891
+ def do_classifier_free_guidance(self):
892
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
893
+
894
+ @property
895
+ def cross_attention_kwargs(self):
896
+ return self._cross_attention_kwargs
897
+
898
+ @property
899
+ def num_timesteps(self):
900
+ return self._num_timesteps
901
+
902
+ @torch.no_grad()
903
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
904
+ def __call__(
905
+ self,
906
+ prompt: Union[str, List[str]] = None,
907
+ image: PipelineImageInput = None,
908
+ height: Optional[int] = None,
909
+ width: Optional[int] = None,
910
+ num_inference_steps: int = 50,
911
+ timesteps: List[int] = None,
912
+ sigmas: List[float] = None,
913
+ guidance_scale: float = 7.5,
914
+ negative_prompt: Optional[Union[str, List[str]]] = None,
915
+ num_images_per_prompt: Optional[int] = 1,
916
+ eta: float = 0.0,
917
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
918
+ latents: Optional[torch.Tensor] = None,
919
+ prompt_embeds: Optional[torch.Tensor] = None,
920
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
921
+ ip_adapter_image: Optional[PipelineImageInput] = None,
922
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
923
+ output_type: Optional[str] = "pil",
924
+ return_dict: bool = True,
925
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
926
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
927
+ guess_mode: bool = False,
928
+ control_guidance_start: Union[float, List[float]] = 0.0,
929
+ control_guidance_end: Union[float, List[float]] = 1.0,
930
+ clip_skip: Optional[int] = None,
931
+ callback_on_step_end: Optional[
932
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
933
+ ] = None,
934
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
935
+ **kwargs,
936
+ ):
937
+ r"""
938
+ The call function to the pipeline for generation.
939
+
940
+ Args:
941
+ prompt (`str` or `List[str]`, *optional*):
942
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
943
+ image (`torch.Tensor`, `PIL.Image.Image`, `np.ndarray`, `List[torch.Tensor]`, `List[PIL.Image.Image]`, `List[np.ndarray]`,:
944
+ `List[List[torch.Tensor]]`, `List[List[np.ndarray]]` or `List[List[PIL.Image.Image]]`):
945
+ The ControlNet input condition to provide guidance to the `unet` for generation. If the type is
946
+ specified as `torch.Tensor`, it is passed to ControlNet as is. `PIL.Image.Image` can also be accepted
947
+ as an image. The dimensions of the output image defaults to `image`'s dimensions. If height and/or
948
+ width are passed, `image` is resized accordingly. If multiple ControlNets are specified in `init`,
949
+ images must be passed as a list such that each element of the list can be correctly batched for input
950
+ to a single ControlNet. When `prompt` is a list, and if a list of images is passed for a single
951
+ ControlNet, each will be paired with each prompt in the `prompt` list. This also applies to multiple
952
+ ControlNets, where a list of image lists can be passed to batch for each prompt and each ControlNet.
953
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
954
+ The height in pixels of the generated image.
955
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
956
+ The width in pixels of the generated image.
957
+ num_inference_steps (`int`, *optional*, defaults to 50):
958
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
959
+ expense of slower inference.
960
+ timesteps (`List[int]`, *optional*):
961
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
962
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
963
+ passed will be used. Must be in descending order.
964
+ sigmas (`List[float]`, *optional*):
965
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
966
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
967
+ will be used.
968
+ guidance_scale (`float`, *optional*, defaults to 7.5):
969
+ A higher guidance scale value encourages the model to generate images closely linked to the text
970
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
971
+ negative_prompt (`str` or `List[str]`, *optional*):
972
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
973
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
974
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
975
+ The number of images to generate per prompt.
976
+ eta (`float`, *optional*, defaults to 0.0):
977
+ Corresponds to parameter eta (Ξ·) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
978
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
979
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
980
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
981
+ generation deterministic.
982
+ latents (`torch.Tensor`, *optional*):
983
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
984
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
985
+ tensor is generated by sampling using the supplied random `generator`.
986
+ prompt_embeds (`torch.Tensor`, *optional*):
987
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
988
+ provided, text embeddings are generated from the `prompt` input argument.
989
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
990
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
991
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
992
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
993
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
994
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
995
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
996
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
997
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
998
+ output_type (`str`, *optional*, defaults to `"pil"`):
999
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
1000
+ return_dict (`bool`, *optional*, defaults to `True`):
1001
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
1002
+ plain tuple.
1003
+ callback (`Callable`, *optional*):
1004
+ A function that calls every `callback_steps` steps during inference. The function is called with the
1005
+ following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`.
1006
+ callback_steps (`int`, *optional*, defaults to 1):
1007
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
1008
+ every step.
1009
+ cross_attention_kwargs (`dict`, *optional*):
1010
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
1011
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1012
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
1013
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
1014
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
1015
+ the corresponding scale as a list.
1016
+ guess_mode (`bool`, *optional*, defaults to `False`):
1017
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
1018
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
1019
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
1020
+ The percentage of total steps at which the ControlNet starts applying.
1021
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
1022
+ The percentage of total steps at which the ControlNet stops applying.
1023
+ clip_skip (`int`, *optional*):
1024
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
1025
+ the output of the pre-final layer will be used for computing the prompt embeddings.
1026
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
1027
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
1028
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
1029
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
1030
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
1031
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
1032
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
1033
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
1034
+ `._callback_tensor_inputs` attribute of your pipeline class.
1035
+
1036
+ Examples:
1037
+
1038
+ Returns:
1039
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
1040
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
1041
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
1042
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
1043
+ "not-safe-for-work" (nsfw) content.
1044
+ """
1045
+
1046
+ callback = kwargs.pop("callback", None)
1047
+ callback_steps = kwargs.pop("callback_steps", None)
1048
+
1049
+ if callback is not None:
1050
+ deprecate(
1051
+ "callback",
1052
+ "1.0.0",
1053
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1054
+ )
1055
+ if callback_steps is not None:
1056
+ deprecate(
1057
+ "callback_steps",
1058
+ "1.0.0",
1059
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
1060
+ )
1061
+
1062
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
1063
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
1064
+
1065
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
1066
+
1067
+ # align format for control guidance
1068
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
1069
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
1070
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
1071
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
1072
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
1073
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
1074
+ control_guidance_start, control_guidance_end = (
1075
+ mult * [control_guidance_start],
1076
+ mult * [control_guidance_end],
1077
+ )
1078
+
1079
+ # 1. Check inputs. Raise error if not correct
1080
+ self.check_inputs(
1081
+ prompt,
1082
+ image,
1083
+ callback_steps,
1084
+ negative_prompt,
1085
+ prompt_embeds,
1086
+ negative_prompt_embeds,
1087
+ ip_adapter_image,
1088
+ ip_adapter_image_embeds,
1089
+ controlnet_conditioning_scale,
1090
+ control_guidance_start,
1091
+ control_guidance_end,
1092
+ callback_on_step_end_tensor_inputs,
1093
+ )
1094
+
1095
+ self._guidance_scale = guidance_scale
1096
+ self._clip_skip = clip_skip
1097
+ self._cross_attention_kwargs = cross_attention_kwargs
1098
+
1099
+ # 2. Define call parameters
1100
+ if prompt is not None and isinstance(prompt, str):
1101
+ batch_size = 1
1102
+ elif prompt is not None and isinstance(prompt, list):
1103
+ batch_size = len(prompt)
1104
+ else:
1105
+ batch_size = prompt_embeds.shape[0]
1106
+
1107
+ device = self._execution_device
1108
+
1109
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
1110
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
1111
+
1112
+ global_pool_conditions = (
1113
+ controlnet.config.global_pool_conditions
1114
+ if isinstance(controlnet, ControlNetModel)
1115
+ else controlnet.nets[0].config.global_pool_conditions
1116
+ )
1117
+ guess_mode = guess_mode or global_pool_conditions
1118
+
1119
+ # 3. Encode input prompt
1120
+ text_encoder_lora_scale = (
1121
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1122
+ )
1123
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1124
+ prompt,
1125
+ device,
1126
+ num_images_per_prompt,
1127
+ self.do_classifier_free_guidance,
1128
+ negative_prompt,
1129
+ prompt_embeds=prompt_embeds,
1130
+ negative_prompt_embeds=negative_prompt_embeds,
1131
+ lora_scale=text_encoder_lora_scale,
1132
+ clip_skip=self.clip_skip,
1133
+ )
1134
+ # For classifier free guidance, we need to do two forward passes.
1135
+ # Here we concatenate the unconditional and text embeddings into a single batch
1136
+ # to avoid doing two forward passes
1137
+ if self.do_classifier_free_guidance:
1138
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
1139
+
1140
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1141
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1142
+ ip_adapter_image,
1143
+ ip_adapter_image_embeds,
1144
+ device,
1145
+ batch_size * num_images_per_prompt,
1146
+ self.do_classifier_free_guidance,
1147
+ )
1148
+
1149
+ # 4. Prepare image
1150
+ if isinstance(controlnet, ControlNetModel):
1151
+ image = self.prepare_image(
1152
+ image=image,
1153
+ width=width,
1154
+ height=height,
1155
+ batch_size=batch_size * num_images_per_prompt,
1156
+ num_images_per_prompt=num_images_per_prompt,
1157
+ device=device,
1158
+ dtype=controlnet.dtype,
1159
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1160
+ guess_mode=guess_mode,
1161
+ )
1162
+ height, width = image.shape[-2:]
1163
+ elif isinstance(controlnet, MultiControlNetModel):
1164
+ images = []
1165
+
1166
+ # Nested lists as ControlNet condition
1167
+ if isinstance(image[0], list):
1168
+ # Transpose the nested image list
1169
+ image = [list(t) for t in zip(*image)]
1170
+
1171
+ for image_ in image:
1172
+ image_ = self.prepare_image(
1173
+ image=image_,
1174
+ width=width,
1175
+ height=height,
1176
+ batch_size=batch_size * num_images_per_prompt,
1177
+ num_images_per_prompt=num_images_per_prompt,
1178
+ device=device,
1179
+ dtype=controlnet.dtype,
1180
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
1181
+ guess_mode=guess_mode,
1182
+ )
1183
+
1184
+ images.append(image_)
1185
+
1186
+ image = images
1187
+ height, width = image[0].shape[-2:]
1188
+ else:
1189
+ assert False
1190
+
1191
+ # 5. Prepare timesteps
1192
+ timesteps, num_inference_steps = retrieve_timesteps(
1193
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
1194
+ )
1195
+ self._num_timesteps = len(timesteps)
1196
+
1197
+ # 6. Prepare latent variables
1198
+ num_channels_latents = self.unet.config.in_channels
1199
+ latents = self.prepare_latents(
1200
+ batch_size * num_images_per_prompt,
1201
+ num_channels_latents,
1202
+ height,
1203
+ width,
1204
+ prompt_embeds.dtype,
1205
+ device,
1206
+ generator,
1207
+ latents,
1208
+ )
1209
+
1210
+ # 6.5 Optionally get Guidance Scale Embedding
1211
+ timestep_cond = None
1212
+ if self.unet.config.time_cond_proj_dim is not None:
1213
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1214
+ timestep_cond = self.get_guidance_scale_embedding(
1215
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1216
+ ).to(device=device, dtype=latents.dtype)
1217
+
1218
+ # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1219
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1220
+
1221
+ # 7.1 Add image embeds for IP-Adapter
1222
+ added_cond_kwargs = (
1223
+ {"image_embeds": image_embeds}
1224
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
1225
+ else None
1226
+ )
1227
+
1228
+ # 7.2 Create tensor stating which controlnets to keep
1229
+ controlnet_keep = []
1230
+ for i in range(len(timesteps)):
1231
+ keeps = [
1232
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1233
+ for s, e in zip(control_guidance_start, control_guidance_end)
1234
+ ]
1235
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1236
+
1237
+ # 8. Denoising loop
1238
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1239
+ is_unet_compiled = is_compiled_module(self.unet)
1240
+ is_controlnet_compiled = is_compiled_module(self.controlnet)
1241
+ is_torch_higher_equal_2_1 = is_torch_version(">=", "2.1")
1242
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1243
+ for i, t in enumerate(timesteps):
1244
+ # Relevant thread:
1245
+ # https://dev-discuss.pytorch.org/t/cudagraphs-in-pytorch-2-0/1428
1246
+ if (is_unet_compiled and is_controlnet_compiled) and is_torch_higher_equal_2_1:
1247
+ torch._inductor.cudagraph_mark_step_begin()
1248
+ # expand the latents if we are doing classifier free guidance
1249
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1250
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1251
+
1252
+ # controlnet(s) inference
1253
+ if guess_mode and self.do_classifier_free_guidance:
1254
+ # Infer ControlNet only for the conditional batch.
1255
+ control_model_input = latents
1256
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1257
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1258
+ else:
1259
+ control_model_input = latent_model_input
1260
+ controlnet_prompt_embeds = prompt_embeds
1261
+
1262
+ if isinstance(controlnet_keep[i], list):
1263
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1264
+ else:
1265
+ controlnet_cond_scale = controlnet_conditioning_scale
1266
+ if isinstance(controlnet_cond_scale, list):
1267
+ controlnet_cond_scale = controlnet_cond_scale[0]
1268
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1269
+
1270
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1271
+ control_model_input,
1272
+ t,
1273
+ encoder_hidden_states=controlnet_prompt_embeds,
1274
+ controlnet_cond=image,
1275
+ conditioning_scale=cond_scale,
1276
+ guess_mode=guess_mode,
1277
+ return_dict=False,
1278
+ )
1279
+
1280
+ if guess_mode and self.do_classifier_free_guidance:
1281
+ # Infered ControlNet only for the conditional batch.
1282
+ # To apply the output of ControlNet to both the unconditional and conditional batches,
1283
+ # add 0 to the unconditional batch to keep it unchanged.
1284
+ down_block_res_samples = [torch.cat([torch.zeros_like(d), d]) for d in down_block_res_samples]
1285
+ mid_block_res_sample = torch.cat([torch.zeros_like(mid_block_res_sample), mid_block_res_sample])
1286
+
1287
+ # predict the noise residual
1288
+ noise_pred = self.unet(
1289
+ latent_model_input,
1290
+ t,
1291
+ encoder_hidden_states=prompt_embeds,
1292
+ timestep_cond=timestep_cond,
1293
+ cross_attention_kwargs=self.cross_attention_kwargs,
1294
+ down_block_additional_residuals=down_block_res_samples,
1295
+ mid_block_additional_residual=mid_block_res_sample,
1296
+ added_cond_kwargs=added_cond_kwargs,
1297
+ return_dict=False,
1298
+ )[0]
1299
+
1300
+ # perform guidance
1301
+ if self.do_classifier_free_guidance:
1302
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1303
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1304
+
1305
+ # compute the previous noisy sample x_t -> x_t-1
1306
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1307
+
1308
+ if callback_on_step_end is not None:
1309
+ callback_kwargs = {}
1310
+ for k in callback_on_step_end_tensor_inputs:
1311
+ callback_kwargs[k] = locals()[k]
1312
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1313
+
1314
+ latents = callback_outputs.pop("latents", latents)
1315
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1316
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1317
+
1318
+ # call the callback, if provided
1319
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1320
+ progress_bar.update()
1321
+ if callback is not None and i % callback_steps == 0:
1322
+ step_idx = i // getattr(self.scheduler, "order", 1)
1323
+ callback(step_idx, t, latents)
1324
+
1325
+ # If we do sequential model offloading, let's offload unet and controlnet
1326
+ # manually for max memory savings
1327
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1328
+ self.unet.to("cpu")
1329
+ self.controlnet.to("cpu")
1330
+ torch.cuda.empty_cache()
1331
+
1332
+ if not output_type == "latent":
1333
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1334
+ 0
1335
+ ]
1336
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1337
+ else:
1338
+ image = latents
1339
+ has_nsfw_concept = None
1340
+
1341
+ if has_nsfw_concept is None:
1342
+ do_denormalize = [True] * image.shape[0]
1343
+ else:
1344
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1345
+
1346
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1347
+
1348
+ # Offload all models
1349
+ self.maybe_free_model_hooks()
1350
+
1351
+ if not return_dict:
1352
+ return (image, has_nsfw_concept)
1353
+
1354
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)