SunderAli17 commited on
Commit
c71b45e
1 Parent(s): 1295a0c

Create unet/unet_2d_ZeroSFT.py

Browse files
Files changed (1) hide show
  1. module/unet/unet_2d_ZeroSFT.py +1377 -0
module/unet/unet_2d_ZeroSFT.py ADDED
@@ -0,0 +1,1377 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copy from diffusers.models.unets.unet_2d_condition.py
2
+
3
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+ from dataclasses import dataclass
17
+ from typing import Any, Dict, List, Optional, Tuple, Union
18
+
19
+ import torch
20
+ import torch.nn as nn
21
+ import torch.utils.checkpoint
22
+
23
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
24
+ from diffusers.loaders import PeftAdapterMixin, UNet2DConditionLoadersMixin
25
+ from diffusers.utils import USE_PEFT_BACKEND, BaseOutput, deprecate, logging, scale_lora_layers, unscale_lora_layers
26
+ from diffusers.models.activations import get_activation
27
+ from diffusers.models.attention_processor import (
28
+ ADDED_KV_ATTENTION_PROCESSORS,
29
+ CROSS_ATTENTION_PROCESSORS,
30
+ Attention,
31
+ AttentionProcessor,
32
+ AttnAddedKVProcessor,
33
+ AttnProcessor,
34
+ )
35
+ from diffusers.models.embeddings import (
36
+ GaussianFourierProjection,
37
+ GLIGENTextBoundingboxProjection,
38
+ ImageHintTimeEmbedding,
39
+ ImageProjection,
40
+ ImageTimeEmbedding,
41
+ TextImageProjection,
42
+ TextImageTimeEmbedding,
43
+ TextTimeEmbedding,
44
+ TimestepEmbedding,
45
+ Timesteps,
46
+ )
47
+ from diffusers.models.modeling_utils import ModelMixin
48
+ from .unet_2d_ZeroSFT_blocks import (
49
+ get_down_block,
50
+ get_mid_block,
51
+ get_up_block,
52
+ )
53
+
54
+
55
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
56
+
57
+
58
+ def zero_module(module):
59
+ for p in module.parameters():
60
+ nn.init.zeros_(p)
61
+ return module
62
+
63
+
64
+ class ZeroConv(nn.Module):
65
+ def __init__(self, label_nc, norm_nc, mask=False):
66
+ super().__init__()
67
+ self.zero_conv = zero_module(nn.Conv2d(label_nc, norm_nc, 1, 1, 0))
68
+ self.mask = mask
69
+
70
+ def forward(self, c, h, h_ori=None):
71
+ # with torch.cuda.amp.autocast(enabled=False, dtype=torch.float32):
72
+ if not self.mask:
73
+ h = h + self.zero_conv(c)
74
+ else:
75
+ h = h + self.zero_conv(c) * torch.zeros_like(h)
76
+ if h_ori is not None:
77
+ h = torch.cat([h_ori, h], dim=1)
78
+ return h
79
+
80
+
81
+ class ZeroSFT(nn.Module):
82
+ def __init__(self, label_nc, norm_nc, concat_channels=0, norm=True, mask=False):
83
+ super().__init__()
84
+
85
+ # param_free_norm_type = str(parsed.group(1))
86
+ ks = 3
87
+ pw = ks // 2
88
+
89
+ self.mask = mask
90
+ self.norm = norm
91
+ self.pre_concat = bool(concat_channels != 0)
92
+ if self.norm:
93
+ self.param_free_norm = torch.nn.GroupNorm(num_groups=32, num_channels=norm_nc + concat_channels)
94
+ else:
95
+ self.param_free_norm = nn.Identity()
96
+
97
+ nhidden = 128
98
+
99
+ self.mlp_shared = nn.Sequential(
100
+ nn.Conv2d(label_nc, nhidden, kernel_size=ks, padding=pw),
101
+ nn.SiLU()
102
+ )
103
+ self.zero_mul = zero_module(nn.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw))
104
+ self.zero_add = zero_module(nn.Conv2d(nhidden, norm_nc + concat_channels, kernel_size=ks, padding=pw))
105
+
106
+ self.zero_conv = zero_module(nn.Conv2d(label_nc, norm_nc, 1, 1, 0))
107
+
108
+ def forward(self, down_block_res_samples, h_ori=None, control_scale=1.0, mask=False):
109
+ mask = mask or self.mask
110
+ assert mask is False
111
+ if self.pre_concat:
112
+ assert h_ori is not None
113
+
114
+ c,h = down_block_res_samples
115
+ if h_ori is not None:
116
+ h_raw = torch.cat([h_ori, h], dim=1)
117
+ else:
118
+ h_raw = h
119
+
120
+ if self.mask:
121
+ h = h + self.zero_conv(c) * torch.zeros_like(h)
122
+ else:
123
+ h = h + self.zero_conv(c)
124
+ if h_ori is not None and self.pre_concat:
125
+ h = torch.cat([h_ori, h], dim=1)
126
+ actv = self.mlp_shared(c)
127
+ gamma = self.zero_mul(actv)
128
+ beta = self.zero_add(actv)
129
+ if self.mask:
130
+ gamma = gamma * torch.zeros_like(gamma)
131
+ beta = beta * torch.zeros_like(beta)
132
+ # h = h + self.param_free_norm(h) * gamma + beta
133
+ h = self.param_free_norm(h) * (gamma + 1) + beta
134
+ if h_ori is not None and not self.pre_concat:
135
+ h = torch.cat([h_ori, h], dim=1)
136
+ return h * control_scale + h_raw * (1 - control_scale)
137
+
138
+
139
+ @dataclass
140
+ class UNet2DConditionOutput(BaseOutput):
141
+ """
142
+ The output of [`UNet2DConditionModel`].
143
+ Args:
144
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
145
+ The hidden states output conditioned on `encoder_hidden_states` input. Output of last layer of model.
146
+ """
147
+
148
+ sample: torch.FloatTensor = None
149
+
150
+
151
+ class UNet2DZeroSFTModel(ModelMixin, ConfigMixin, UNet2DConditionLoadersMixin, PeftAdapterMixin):
152
+ r"""
153
+ A conditional 2D UNet model that takes a noisy sample, conditional state, and a timestep and returns a sample
154
+ shaped output.
155
+ This model inherits from [`ModelMixin`]. Check the superclass documentation for it's generic methods implemented
156
+ for all models (such as downloading or saving).
157
+ Parameters:
158
+ sample_size (`int` or `Tuple[int, int]`, *optional*, defaults to `None`):
159
+ Height and width of input/output sample.
160
+ in_channels (`int`, *optional*, defaults to 4): Number of channels in the input sample.
161
+ out_channels (`int`, *optional*, defaults to 4): Number of channels in the output.
162
+ center_input_sample (`bool`, *optional*, defaults to `False`): Whether to center the input sample.
163
+ flip_sin_to_cos (`bool`, *optional*, defaults to `True`):
164
+ Whether to flip the sin to cos in the time embedding.
165
+ freq_shift (`int`, *optional*, defaults to 0): The frequency shift to apply to the time embedding.
166
+ down_block_types (`Tuple[str]`, *optional*, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`):
167
+ The tuple of downsample blocks to use.
168
+ mid_block_type (`str`, *optional*, defaults to `"UNetMidBlock2DCrossAttn"`):
169
+ Block type for middle of UNet, it can be one of `UNetMidBlock2DCrossAttn`, `UNetMidBlock2D`, or
170
+ `UNetMidBlock2DSimpleCrossAttn`. If `None`, the mid block layer is skipped.
171
+ up_block_types (`Tuple[str]`, *optional*, defaults to `("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D")`):
172
+ The tuple of upsample blocks to use.
173
+ only_cross_attention(`bool` or `Tuple[bool]`, *optional*, default to `False`):
174
+ Whether to include self-attention in the basic transformer blocks, see
175
+ [`~models.attention.BasicTransformerBlock`].
176
+ block_out_channels (`Tuple[int]`, *optional*, defaults to `(320, 640, 1280, 1280)`):
177
+ The tuple of output channels for each block.
178
+ layers_per_block (`int`, *optional*, defaults to 2): The number of layers per block.
179
+ downsample_padding (`int`, *optional*, defaults to 1): The padding to use for the downsampling convolution.
180
+ mid_block_scale_factor (`float`, *optional*, defaults to 1.0): The scale factor to use for the mid block.
181
+ dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
182
+ act_fn (`str`, *optional*, defaults to `"silu"`): The activation function to use.
183
+ norm_num_groups (`int`, *optional*, defaults to 32): The number of groups to use for the normalization.
184
+ If `None`, normalization and activation layers is skipped in post-processing.
185
+ norm_eps (`float`, *optional*, defaults to 1e-5): The epsilon to use for the normalization.
186
+ cross_attention_dim (`int` or `Tuple[int]`, *optional*, defaults to 1280):
187
+ The dimension of the cross attention features.
188
+ transformer_layers_per_block (`int`, `Tuple[int]`, or `Tuple[Tuple]` , *optional*, defaults to 1):
189
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for
190
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
191
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
192
+ reverse_transformer_layers_per_block : (`Tuple[Tuple]`, *optional*, defaults to None):
193
+ The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`], in the upsampling
194
+ blocks of the U-Net. Only relevant if `transformer_layers_per_block` is of type `Tuple[Tuple]` and for
195
+ [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`],
196
+ [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`].
197
+ encoder_hid_dim (`int`, *optional*, defaults to None):
198
+ If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim`
199
+ dimension to `cross_attention_dim`.
200
+ encoder_hid_dim_type (`str`, *optional*, defaults to `None`):
201
+ If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text
202
+ embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`.
203
+ attention_head_dim (`int`, *optional*, defaults to 8): The dimension of the attention heads.
204
+ num_attention_heads (`int`, *optional*):
205
+ The number of attention heads. If not defined, defaults to `attention_head_dim`
206
+ resnet_time_scale_shift (`str`, *optional*, defaults to `"default"`): Time scale shift config
207
+ for ResNet blocks (see [`~models.resnet.ResnetBlock2D`]). Choose from `default` or `scale_shift`.
208
+ class_embed_type (`str`, *optional*, defaults to `None`):
209
+ The type of class embedding to use which is ultimately summed with the time embeddings. Choose from `None`,
210
+ `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`.
211
+ addition_embed_type (`str`, *optional*, defaults to `None`):
212
+ Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or
213
+ "text". "text" will use the `TextTimeEmbedding` layer.
214
+ addition_time_embed_dim: (`int`, *optional*, defaults to `None`):
215
+ Dimension for the timestep embeddings.
216
+ num_class_embeds (`int`, *optional*, defaults to `None`):
217
+ Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing
218
+ class conditioning with `class_embed_type` equal to `None`.
219
+ time_embedding_type (`str`, *optional*, defaults to `positional`):
220
+ The type of position embedding to use for timesteps. Choose from `positional` or `fourier`.
221
+ time_embedding_dim (`int`, *optional*, defaults to `None`):
222
+ An optional override for the dimension of the projected time embedding.
223
+ time_embedding_act_fn (`str`, *optional*, defaults to `None`):
224
+ Optional activation function to use only once on the time embeddings before they are passed to the rest of
225
+ the UNet. Choose from `silu`, `mish`, `gelu`, and `swish`.
226
+ timestep_post_act (`str`, *optional*, defaults to `None`):
227
+ The second activation function to use in timestep embedding. Choose from `silu`, `mish` and `gelu`.
228
+ time_cond_proj_dim (`int`, *optional*, defaults to `None`):
229
+ The dimension of `cond_proj` layer in the timestep embedding.
230
+ conv_in_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_in` layer.
231
+ conv_out_kernel (`int`, *optional*, default to `3`): The kernel size of `conv_out` layer.
232
+ projection_class_embeddings_input_dim (`int`, *optional*): The dimension of the `class_labels` input when
233
+ `class_embed_type="projection"`. Required when `class_embed_type="projection"`.
234
+ class_embeddings_concat (`bool`, *optional*, defaults to `False`): Whether to concatenate the time
235
+ embeddings with the class embeddings.
236
+ mid_block_only_cross_attention (`bool`, *optional*, defaults to `None`):
237
+ Whether to use cross attention with the mid block when using the `UNetMidBlock2DSimpleCrossAttn`. If
238
+ `only_cross_attention` is given as a single boolean and `mid_block_only_cross_attention` is `None`, the
239
+ `only_cross_attention` value is used as the value for `mid_block_only_cross_attention`. Default to `False`
240
+ otherwise.
241
+ """
242
+
243
+ _supports_gradient_checkpointing = True
244
+ _no_split_modules = ["BasicTransformerBlock", "ResnetBlock2D", "CrossAttnUpBlock2D"]
245
+
246
+ @register_to_config
247
+ def __init__(
248
+ self,
249
+ sample_size: Optional[int] = None,
250
+ in_channels: int = 4,
251
+ out_channels: int = 4,
252
+ center_input_sample: bool = False,
253
+ flip_sin_to_cos: bool = True,
254
+ freq_shift: int = 0,
255
+ down_block_types: Tuple[str] = (
256
+ "CrossAttnDownBlock2D",
257
+ "CrossAttnDownBlock2D",
258
+ "CrossAttnDownBlock2D",
259
+ "DownBlock2D",
260
+ ),
261
+ mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
262
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
263
+ only_cross_attention: Union[bool, Tuple[bool]] = False,
264
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
265
+ layers_per_block: Union[int, Tuple[int]] = 2,
266
+ downsample_padding: int = 1,
267
+ mid_block_scale_factor: float = 1,
268
+ dropout: float = 0.0,
269
+ act_fn: str = "silu",
270
+ norm_num_groups: Optional[int] = 32,
271
+ norm_eps: float = 1e-5,
272
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
273
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
274
+ reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
275
+ encoder_hid_dim: Optional[int] = None,
276
+ encoder_hid_dim_type: Optional[str] = None,
277
+ attention_head_dim: Union[int, Tuple[int]] = 8,
278
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
279
+ dual_cross_attention: bool = False,
280
+ use_linear_projection: bool = False,
281
+ class_embed_type: Optional[str] = None,
282
+ addition_embed_type: Optional[str] = None,
283
+ addition_time_embed_dim: Optional[int] = None,
284
+ num_class_embeds: Optional[int] = None,
285
+ upcast_attention: bool = False,
286
+ resnet_time_scale_shift: str = "default",
287
+ resnet_skip_time_act: bool = False,
288
+ resnet_out_scale_factor: float = 1.0,
289
+ time_embedding_type: str = "positional",
290
+ time_embedding_dim: Optional[int] = None,
291
+ time_embedding_act_fn: Optional[str] = None,
292
+ timestep_post_act: Optional[str] = None,
293
+ time_cond_proj_dim: Optional[int] = None,
294
+ conv_in_kernel: int = 3,
295
+ conv_out_kernel: int = 3,
296
+ projection_class_embeddings_input_dim: Optional[int] = None,
297
+ attention_type: str = "default",
298
+ class_embeddings_concat: bool = False,
299
+ mid_block_only_cross_attention: Optional[bool] = None,
300
+ cross_attention_norm: Optional[str] = None,
301
+ addition_embed_type_num_heads: int = 64,
302
+ ):
303
+ super().__init__()
304
+
305
+ self.sample_size = sample_size
306
+
307
+ if num_attention_heads is not None:
308
+ raise ValueError(
309
+ "At the moment it is not possible to define the number of attention heads via `num_attention_heads` because of a naming issue as described in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131. Passing `num_attention_heads` will only be supported in diffusers v0.19."
310
+ )
311
+
312
+ # If `num_attention_heads` is not defined (which is the case for most models)
313
+ # it will default to `attention_head_dim`. This looks weird upon first reading it and it is.
314
+ # The reason for this behavior is to correct for incorrectly named variables that were introduced
315
+ # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131
316
+ # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking
317
+ # which is why we correct for the naming here.
318
+ num_attention_heads = num_attention_heads or attention_head_dim
319
+
320
+ # Check inputs
321
+ self._check_config(
322
+ down_block_types=down_block_types,
323
+ up_block_types=up_block_types,
324
+ only_cross_attention=only_cross_attention,
325
+ block_out_channels=block_out_channels,
326
+ layers_per_block=layers_per_block,
327
+ cross_attention_dim=cross_attention_dim,
328
+ transformer_layers_per_block=transformer_layers_per_block,
329
+ reverse_transformer_layers_per_block=reverse_transformer_layers_per_block,
330
+ attention_head_dim=attention_head_dim,
331
+ num_attention_heads=num_attention_heads,
332
+ )
333
+
334
+ # input
335
+ conv_in_padding = (conv_in_kernel - 1) // 2
336
+ self.conv_in = nn.Conv2d(
337
+ in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding
338
+ )
339
+
340
+ # time
341
+ time_embed_dim, timestep_input_dim = self._set_time_proj(
342
+ time_embedding_type,
343
+ block_out_channels=block_out_channels,
344
+ flip_sin_to_cos=flip_sin_to_cos,
345
+ freq_shift=freq_shift,
346
+ time_embedding_dim=time_embedding_dim,
347
+ )
348
+
349
+ self.time_embedding = TimestepEmbedding(
350
+ timestep_input_dim,
351
+ time_embed_dim,
352
+ act_fn=act_fn,
353
+ post_act_fn=timestep_post_act,
354
+ cond_proj_dim=time_cond_proj_dim,
355
+ )
356
+
357
+ self._set_encoder_hid_proj(
358
+ encoder_hid_dim_type,
359
+ cross_attention_dim=cross_attention_dim,
360
+ encoder_hid_dim=encoder_hid_dim,
361
+ )
362
+
363
+ # class embedding
364
+ self._set_class_embedding(
365
+ class_embed_type,
366
+ act_fn=act_fn,
367
+ num_class_embeds=num_class_embeds,
368
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
369
+ time_embed_dim=time_embed_dim,
370
+ timestep_input_dim=timestep_input_dim,
371
+ )
372
+
373
+ self._set_add_embedding(
374
+ addition_embed_type,
375
+ addition_embed_type_num_heads=addition_embed_type_num_heads,
376
+ addition_time_embed_dim=addition_time_embed_dim,
377
+ cross_attention_dim=cross_attention_dim,
378
+ encoder_hid_dim=encoder_hid_dim,
379
+ flip_sin_to_cos=flip_sin_to_cos,
380
+ freq_shift=freq_shift,
381
+ projection_class_embeddings_input_dim=projection_class_embeddings_input_dim,
382
+ time_embed_dim=time_embed_dim,
383
+ )
384
+
385
+ if time_embedding_act_fn is None:
386
+ self.time_embed_act = None
387
+ else:
388
+ self.time_embed_act = get_activation(time_embedding_act_fn)
389
+
390
+ self.down_blocks = nn.ModuleList([])
391
+ self.up_blocks = nn.ModuleList([])
392
+
393
+ if isinstance(only_cross_attention, bool):
394
+ if mid_block_only_cross_attention is None:
395
+ mid_block_only_cross_attention = only_cross_attention
396
+
397
+ only_cross_attention = [only_cross_attention] * len(down_block_types)
398
+
399
+ if mid_block_only_cross_attention is None:
400
+ mid_block_only_cross_attention = False
401
+
402
+ if isinstance(num_attention_heads, int):
403
+ num_attention_heads = (num_attention_heads,) * len(down_block_types)
404
+
405
+ if isinstance(attention_head_dim, int):
406
+ attention_head_dim = (attention_head_dim,) * len(down_block_types)
407
+
408
+ if isinstance(cross_attention_dim, int):
409
+ cross_attention_dim = (cross_attention_dim,) * len(down_block_types)
410
+
411
+ if isinstance(layers_per_block, int):
412
+ layers_per_block = [layers_per_block] * len(down_block_types)
413
+
414
+ if isinstance(transformer_layers_per_block, int):
415
+ transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types)
416
+
417
+ if class_embeddings_concat:
418
+ # The time embeddings are concatenated with the class embeddings. The dimension of the
419
+ # time embeddings passed to the down, middle, and up blocks is twice the dimension of the
420
+ # regular time embeddings
421
+ blocks_time_embed_dim = time_embed_dim * 2
422
+ else:
423
+ blocks_time_embed_dim = time_embed_dim
424
+
425
+ # down
426
+ output_channel = block_out_channels[0]
427
+ for i, down_block_type in enumerate(down_block_types):
428
+ input_channel = output_channel
429
+ output_channel = block_out_channels[i]
430
+ is_final_block = i == len(block_out_channels) - 1
431
+
432
+ down_block = get_down_block(
433
+ down_block_type,
434
+ num_layers=layers_per_block[i],
435
+ transformer_layers_per_block=transformer_layers_per_block[i],
436
+ in_channels=input_channel,
437
+ out_channels=output_channel,
438
+ temb_channels=blocks_time_embed_dim,
439
+ add_downsample=not is_final_block,
440
+ resnet_eps=norm_eps,
441
+ resnet_act_fn=act_fn,
442
+ resnet_groups=norm_num_groups,
443
+ cross_attention_dim=cross_attention_dim[i],
444
+ num_attention_heads=num_attention_heads[i],
445
+ downsample_padding=downsample_padding,
446
+ dual_cross_attention=dual_cross_attention,
447
+ use_linear_projection=use_linear_projection,
448
+ only_cross_attention=only_cross_attention[i],
449
+ upcast_attention=upcast_attention,
450
+ resnet_time_scale_shift=resnet_time_scale_shift,
451
+ attention_type=attention_type,
452
+ resnet_skip_time_act=resnet_skip_time_act,
453
+ resnet_out_scale_factor=resnet_out_scale_factor,
454
+ cross_attention_norm=cross_attention_norm,
455
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
456
+ dropout=dropout,
457
+ )
458
+ self.down_blocks.append(down_block)
459
+
460
+ # mid
461
+ self.mid_block = get_mid_block(
462
+ mid_block_type,
463
+ temb_channels=blocks_time_embed_dim,
464
+ in_channels=block_out_channels[-1],
465
+ resnet_eps=norm_eps,
466
+ resnet_act_fn=act_fn,
467
+ resnet_groups=norm_num_groups,
468
+ output_scale_factor=mid_block_scale_factor,
469
+ transformer_layers_per_block=transformer_layers_per_block[-1],
470
+ num_attention_heads=num_attention_heads[-1],
471
+ cross_attention_dim=cross_attention_dim[-1],
472
+ dual_cross_attention=dual_cross_attention,
473
+ use_linear_projection=use_linear_projection,
474
+ mid_block_only_cross_attention=mid_block_only_cross_attention,
475
+ upcast_attention=upcast_attention,
476
+ resnet_time_scale_shift=resnet_time_scale_shift,
477
+ attention_type=attention_type,
478
+ resnet_skip_time_act=resnet_skip_time_act,
479
+ cross_attention_norm=cross_attention_norm,
480
+ attention_head_dim=attention_head_dim[-1],
481
+ dropout=dropout,
482
+ )
483
+ self.mid_zero_SFT = ZeroSFT(block_out_channels[-1],block_out_channels[-1],0)
484
+
485
+ # count how many layers upsample the images
486
+ self.num_upsamplers = 0
487
+
488
+ # up
489
+ reversed_block_out_channels = list(reversed(block_out_channels))
490
+ reversed_num_attention_heads = list(reversed(num_attention_heads))
491
+ reversed_layers_per_block = list(reversed(layers_per_block))
492
+ reversed_cross_attention_dim = list(reversed(cross_attention_dim))
493
+ reversed_transformer_layers_per_block = (
494
+ list(reversed(transformer_layers_per_block))
495
+ if reverse_transformer_layers_per_block is None
496
+ else reverse_transformer_layers_per_block
497
+ )
498
+ only_cross_attention = list(reversed(only_cross_attention))
499
+
500
+ output_channel = reversed_block_out_channels[0]
501
+ for i, up_block_type in enumerate(up_block_types):
502
+ is_final_block = i == len(block_out_channels) - 1
503
+
504
+ prev_output_channel = output_channel
505
+ output_channel = reversed_block_out_channels[i]
506
+ input_channel = reversed_block_out_channels[min(i + 1, len(block_out_channels) - 1)]
507
+
508
+ # add upsample block for all BUT final layer
509
+ if not is_final_block:
510
+ add_upsample = True
511
+ self.num_upsamplers += 1
512
+ else:
513
+ add_upsample = False
514
+
515
+ up_block = get_up_block(
516
+ up_block_type,
517
+ num_layers=reversed_layers_per_block[i] + 1,
518
+ transformer_layers_per_block=reversed_transformer_layers_per_block[i],
519
+ in_channels=input_channel,
520
+ out_channels=output_channel,
521
+ prev_output_channel=prev_output_channel,
522
+ temb_channels=blocks_time_embed_dim,
523
+ add_upsample=add_upsample,
524
+ resnet_eps=norm_eps,
525
+ resnet_act_fn=act_fn,
526
+ resolution_idx=i,
527
+ resnet_groups=norm_num_groups,
528
+ cross_attention_dim=reversed_cross_attention_dim[i],
529
+ num_attention_heads=reversed_num_attention_heads[i],
530
+ dual_cross_attention=dual_cross_attention,
531
+ use_linear_projection=use_linear_projection,
532
+ only_cross_attention=only_cross_attention[i],
533
+ upcast_attention=upcast_attention,
534
+ resnet_time_scale_shift=resnet_time_scale_shift,
535
+ attention_type=attention_type,
536
+ resnet_skip_time_act=resnet_skip_time_act,
537
+ resnet_out_scale_factor=resnet_out_scale_factor,
538
+ cross_attention_norm=cross_attention_norm,
539
+ attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel,
540
+ dropout=dropout,
541
+ )
542
+ self.up_blocks.append(up_block)
543
+ prev_output_channel = output_channel
544
+
545
+ # out
546
+ if norm_num_groups is not None:
547
+ self.conv_norm_out = nn.GroupNorm(
548
+ num_channels=block_out_channels[0], num_groups=norm_num_groups, eps=norm_eps
549
+ )
550
+
551
+ self.conv_act = get_activation(act_fn)
552
+
553
+ else:
554
+ self.conv_norm_out = None
555
+ self.conv_act = None
556
+
557
+ conv_out_padding = (conv_out_kernel - 1) // 2
558
+ self.conv_out = nn.Conv2d(
559
+ block_out_channels[0], out_channels, kernel_size=conv_out_kernel, padding=conv_out_padding
560
+ )
561
+
562
+ self._set_pos_net_if_use_gligen(attention_type=attention_type, cross_attention_dim=cross_attention_dim)
563
+
564
+ def _check_config(
565
+ self,
566
+ down_block_types: Tuple[str],
567
+ up_block_types: Tuple[str],
568
+ only_cross_attention: Union[bool, Tuple[bool]],
569
+ block_out_channels: Tuple[int],
570
+ layers_per_block: Union[int, Tuple[int]],
571
+ cross_attention_dim: Union[int, Tuple[int]],
572
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple[int]]],
573
+ reverse_transformer_layers_per_block: bool,
574
+ attention_head_dim: int,
575
+ num_attention_heads: Optional[Union[int, Tuple[int]]],
576
+ ):
577
+ if len(down_block_types) != len(up_block_types):
578
+ raise ValueError(
579
+ f"Must provide the same number of `down_block_types` as `up_block_types`. `down_block_types`: {down_block_types}. `up_block_types`: {up_block_types}."
580
+ )
581
+
582
+ if len(block_out_channels) != len(down_block_types):
583
+ raise ValueError(
584
+ f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}."
585
+ )
586
+
587
+ if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types):
588
+ raise ValueError(
589
+ f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}."
590
+ )
591
+
592
+ if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types):
593
+ raise ValueError(
594
+ f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}."
595
+ )
596
+
597
+ if not isinstance(attention_head_dim, int) and len(attention_head_dim) != len(down_block_types):
598
+ raise ValueError(
599
+ f"Must provide the same number of `attention_head_dim` as `down_block_types`. `attention_head_dim`: {attention_head_dim}. `down_block_types`: {down_block_types}."
600
+ )
601
+
602
+ if isinstance(cross_attention_dim, list) and len(cross_attention_dim) != len(down_block_types):
603
+ raise ValueError(
604
+ f"Must provide the same number of `cross_attention_dim` as `down_block_types`. `cross_attention_dim`: {cross_attention_dim}. `down_block_types`: {down_block_types}."
605
+ )
606
+
607
+ if not isinstance(layers_per_block, int) and len(layers_per_block) != len(down_block_types):
608
+ raise ValueError(
609
+ f"Must provide the same number of `layers_per_block` as `down_block_types`. `layers_per_block`: {layers_per_block}. `down_block_types`: {down_block_types}."
610
+ )
611
+ if isinstance(transformer_layers_per_block, list) and reverse_transformer_layers_per_block is None:
612
+ for layer_number_per_block in transformer_layers_per_block:
613
+ if isinstance(layer_number_per_block, list):
614
+ raise ValueError("Must provide 'reverse_transformer_layers_per_block` if using asymmetrical UNet.")
615
+
616
+ def _set_time_proj(
617
+ self,
618
+ time_embedding_type: str,
619
+ block_out_channels: int,
620
+ flip_sin_to_cos: bool,
621
+ freq_shift: float,
622
+ time_embedding_dim: int,
623
+ ) -> Tuple[int, int]:
624
+ if time_embedding_type == "fourier":
625
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 2
626
+ if time_embed_dim % 2 != 0:
627
+ raise ValueError(f"`time_embed_dim` should be divisible by 2, but is {time_embed_dim}.")
628
+ self.time_proj = GaussianFourierProjection(
629
+ time_embed_dim // 2, set_W_to_weight=False, log=False, flip_sin_to_cos=flip_sin_to_cos
630
+ )
631
+ timestep_input_dim = time_embed_dim
632
+ elif time_embedding_type == "positional":
633
+ time_embed_dim = time_embedding_dim or block_out_channels[0] * 4
634
+
635
+ self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift)
636
+ timestep_input_dim = block_out_channels[0]
637
+ else:
638
+ raise ValueError(
639
+ f"{time_embedding_type} does not exist. Please make sure to use one of `fourier` or `positional`."
640
+ )
641
+
642
+ return time_embed_dim, timestep_input_dim
643
+
644
+ def _set_encoder_hid_proj(
645
+ self,
646
+ encoder_hid_dim_type: Optional[str],
647
+ cross_attention_dim: Union[int, Tuple[int]],
648
+ encoder_hid_dim: Optional[int],
649
+ ):
650
+ if encoder_hid_dim_type is None and encoder_hid_dim is not None:
651
+ encoder_hid_dim_type = "text_proj"
652
+ self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type)
653
+ logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.")
654
+
655
+ if encoder_hid_dim is None and encoder_hid_dim_type is not None:
656
+ raise ValueError(
657
+ f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}."
658
+ )
659
+
660
+ if encoder_hid_dim_type == "text_proj":
661
+ self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim)
662
+ elif encoder_hid_dim_type == "text_image_proj":
663
+ # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much
664
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
665
+ # case when `addition_embed_type == "text_image_proj"` (Kandinsky 2.1)`
666
+ self.encoder_hid_proj = TextImageProjection(
667
+ text_embed_dim=encoder_hid_dim,
668
+ image_embed_dim=cross_attention_dim,
669
+ cross_attention_dim=cross_attention_dim,
670
+ )
671
+ elif encoder_hid_dim_type == "image_proj":
672
+ # Kandinsky 2.2
673
+ self.encoder_hid_proj = ImageProjection(
674
+ image_embed_dim=encoder_hid_dim,
675
+ cross_attention_dim=cross_attention_dim,
676
+ )
677
+ elif encoder_hid_dim_type is not None:
678
+ raise ValueError(
679
+ f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'."
680
+ )
681
+ else:
682
+ self.encoder_hid_proj = None
683
+
684
+ def _set_class_embedding(
685
+ self,
686
+ class_embed_type: Optional[str],
687
+ act_fn: str,
688
+ num_class_embeds: Optional[int],
689
+ projection_class_embeddings_input_dim: Optional[int],
690
+ time_embed_dim: int,
691
+ timestep_input_dim: int,
692
+ ):
693
+ if class_embed_type is None and num_class_embeds is not None:
694
+ self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim)
695
+ elif class_embed_type == "timestep":
696
+ self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim, act_fn=act_fn)
697
+ elif class_embed_type == "identity":
698
+ self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim)
699
+ elif class_embed_type == "projection":
700
+ if projection_class_embeddings_input_dim is None:
701
+ raise ValueError(
702
+ "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set"
703
+ )
704
+ # The projection `class_embed_type` is the same as the timestep `class_embed_type` except
705
+ # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings
706
+ # 2. it projects from an arbitrary input dimension.
707
+ #
708
+ # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations.
709
+ # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings.
710
+ # As a result, `TimestepEmbedding` can be passed arbitrary vectors.
711
+ self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
712
+ elif class_embed_type == "simple_projection":
713
+ if projection_class_embeddings_input_dim is None:
714
+ raise ValueError(
715
+ "`class_embed_type`: 'simple_projection' requires `projection_class_embeddings_input_dim` be set"
716
+ )
717
+ self.class_embedding = nn.Linear(projection_class_embeddings_input_dim, time_embed_dim)
718
+ else:
719
+ self.class_embedding = None
720
+
721
+ def _set_add_embedding(
722
+ self,
723
+ addition_embed_type: str,
724
+ addition_embed_type_num_heads: int,
725
+ addition_time_embed_dim: Optional[int],
726
+ flip_sin_to_cos: bool,
727
+ freq_shift: float,
728
+ cross_attention_dim: Optional[int],
729
+ encoder_hid_dim: Optional[int],
730
+ projection_class_embeddings_input_dim: Optional[int],
731
+ time_embed_dim: int,
732
+ ):
733
+ if addition_embed_type == "text":
734
+ if encoder_hid_dim is not None:
735
+ text_time_embedding_from_dim = encoder_hid_dim
736
+ else:
737
+ text_time_embedding_from_dim = cross_attention_dim
738
+
739
+ self.add_embedding = TextTimeEmbedding(
740
+ text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads
741
+ )
742
+ elif addition_embed_type == "text_image":
743
+ # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much
744
+ # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use
745
+ # case when `addition_embed_type == "text_image"` (Kandinsky 2.1)`
746
+ self.add_embedding = TextImageTimeEmbedding(
747
+ text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim
748
+ )
749
+ elif addition_embed_type == "text_time":
750
+ self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift)
751
+ self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim)
752
+ elif addition_embed_type == "image":
753
+ # Kandinsky 2.2
754
+ self.add_embedding = ImageTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
755
+ elif addition_embed_type == "image_hint":
756
+ # Kandinsky 2.2 ControlNet
757
+ self.add_embedding = ImageHintTimeEmbedding(image_embed_dim=encoder_hid_dim, time_embed_dim=time_embed_dim)
758
+ elif addition_embed_type is not None:
759
+ raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.")
760
+
761
+ def _set_pos_net_if_use_gligen(self, attention_type: str, cross_attention_dim: int):
762
+ if attention_type in ["gated", "gated-text-image"]:
763
+ positive_len = 768
764
+ if isinstance(cross_attention_dim, int):
765
+ positive_len = cross_attention_dim
766
+ elif isinstance(cross_attention_dim, tuple) or isinstance(cross_attention_dim, list):
767
+ positive_len = cross_attention_dim[0]
768
+
769
+ feature_type = "text-only" if attention_type == "gated" else "text-image"
770
+ self.position_net = GLIGENTextBoundingboxProjection(
771
+ positive_len=positive_len, out_dim=cross_attention_dim, feature_type=feature_type
772
+ )
773
+
774
+ @property
775
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
776
+ r"""
777
+ Returns:
778
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
779
+ indexed by its weight name.
780
+ """
781
+ # set recursively
782
+ processors = {}
783
+
784
+ def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]):
785
+ if hasattr(module, "get_processor"):
786
+ processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True)
787
+
788
+ for sub_name, child in module.named_children():
789
+ fn_recursive_add_processors(f"{name}.{sub_name}", child, processors)
790
+
791
+ return processors
792
+
793
+ for name, module in self.named_children():
794
+ fn_recursive_add_processors(name, module, processors)
795
+
796
+ return processors
797
+
798
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
799
+ r"""
800
+ Sets the attention processor to use to compute attention.
801
+ Parameters:
802
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
803
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
804
+ for **all** `Attention` layers.
805
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
806
+ processor. This is strongly recommended when setting trainable attention processors.
807
+ """
808
+ count = len(self.attn_processors.keys())
809
+
810
+ if isinstance(processor, dict) and len(processor) != count:
811
+ raise ValueError(
812
+ f"A dict of processors was passed, but the number of processors {len(processor)} does not match the"
813
+ f" number of attention layers: {count}. Please make sure to pass {count} processor classes."
814
+ )
815
+
816
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
817
+ if hasattr(module, "set_processor"):
818
+ if not isinstance(processor, dict):
819
+ module.set_processor(processor)
820
+ else:
821
+ module.set_processor(processor.pop(f"{name}.processor"))
822
+
823
+ for sub_name, child in module.named_children():
824
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
825
+
826
+ for name, module in self.named_children():
827
+ fn_recursive_attn_processor(name, module, processor)
828
+
829
+ def set_default_attn_processor(self):
830
+ """
831
+ Disables custom attention processors and sets the default attention implementation.
832
+ """
833
+ if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
834
+ processor = AttnAddedKVProcessor()
835
+ elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()):
836
+ processor = AttnProcessor()
837
+ else:
838
+ raise ValueError(
839
+ f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}"
840
+ )
841
+
842
+ self.set_attn_processor(processor)
843
+
844
+ def set_attention_slice(self, slice_size: Union[str, int, List[int]] = "auto"):
845
+ r"""
846
+ Enable sliced attention computation.
847
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
848
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
849
+ Args:
850
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
851
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
852
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
853
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
854
+ must be a multiple of `slice_size`.
855
+ """
856
+ sliceable_head_dims = []
857
+
858
+ def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module):
859
+ if hasattr(module, "set_attention_slice"):
860
+ sliceable_head_dims.append(module.sliceable_head_dim)
861
+
862
+ for child in module.children():
863
+ fn_recursive_retrieve_sliceable_dims(child)
864
+
865
+ # retrieve number of attention layers
866
+ for module in self.children():
867
+ fn_recursive_retrieve_sliceable_dims(module)
868
+
869
+ num_sliceable_layers = len(sliceable_head_dims)
870
+
871
+ if slice_size == "auto":
872
+ # half the attention head size is usually a good trade-off between
873
+ # speed and memory
874
+ slice_size = [dim // 2 for dim in sliceable_head_dims]
875
+ elif slice_size == "max":
876
+ # make smallest slice possible
877
+ slice_size = num_sliceable_layers * [1]
878
+
879
+ slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size
880
+
881
+ if len(slice_size) != len(sliceable_head_dims):
882
+ raise ValueError(
883
+ f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different"
884
+ f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}."
885
+ )
886
+
887
+ for i in range(len(slice_size)):
888
+ size = slice_size[i]
889
+ dim = sliceable_head_dims[i]
890
+ if size is not None and size > dim:
891
+ raise ValueError(f"size {size} has to be smaller or equal to {dim}.")
892
+
893
+ # Recursively walk through all the children.
894
+ # Any children which exposes the set_attention_slice method
895
+ # gets the message
896
+ def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]):
897
+ if hasattr(module, "set_attention_slice"):
898
+ module.set_attention_slice(slice_size.pop())
899
+
900
+ for child in module.children():
901
+ fn_recursive_set_attention_slice(child, slice_size)
902
+
903
+ reversed_slice_size = list(reversed(slice_size))
904
+ for module in self.children():
905
+ fn_recursive_set_attention_slice(module, reversed_slice_size)
906
+
907
+ def _set_gradient_checkpointing(self, module, value=False):
908
+ if hasattr(module, "gradient_checkpointing"):
909
+ module.gradient_checkpointing = value
910
+
911
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
912
+ r"""Enables the FreeU mechanism from https://arxiv.org/abs/2309.11497.
913
+ The suffixes after the scaling factors represent the stage blocks where they are being applied.
914
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of values that
915
+ are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
916
+ Args:
917
+ s1 (`float`):
918
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
919
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
920
+ s2 (`float`):
921
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
922
+ mitigate the "oversmoothing effect" in the enhanced denoising process.
923
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
924
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
925
+ """
926
+ for i, upsample_block in enumerate(self.up_blocks):
927
+ setattr(upsample_block, "s1", s1)
928
+ setattr(upsample_block, "s2", s2)
929
+ setattr(upsample_block, "b1", b1)
930
+ setattr(upsample_block, "b2", b2)
931
+
932
+ def disable_freeu(self):
933
+ """Disables the FreeU mechanism."""
934
+ freeu_keys = {"s1", "s2", "b1", "b2"}
935
+ for i, upsample_block in enumerate(self.up_blocks):
936
+ for k in freeu_keys:
937
+ if hasattr(upsample_block, k) or getattr(upsample_block, k, None) is not None:
938
+ setattr(upsample_block, k, None)
939
+
940
+ def fuse_qkv_projections(self):
941
+ """
942
+ Enables fused QKV projections. For self-attention modules, all projection matrices (i.e., query, key, value)
943
+ are fused. For cross-attention modules, key and value projection matrices are fused.
944
+ <Tip warning={true}>
945
+ This API is 🧪 experimental.
946
+ </Tip>
947
+ """
948
+ self.original_attn_processors = None
949
+
950
+ for _, attn_processor in self.attn_processors.items():
951
+ if "Added" in str(attn_processor.__class__.__name__):
952
+ raise ValueError("`fuse_qkv_projections()` is not supported for models having added KV projections.")
953
+
954
+ self.original_attn_processors = self.attn_processors
955
+
956
+ for module in self.modules():
957
+ if isinstance(module, Attention):
958
+ module.fuse_projections(fuse=True)
959
+
960
+ def unfuse_qkv_projections(self):
961
+ """Disables the fused QKV projection if enabled.
962
+ <Tip warning={true}>
963
+ This API is 🧪 experimental.
964
+ </Tip>
965
+ """
966
+ if self.original_attn_processors is not None:
967
+ self.set_attn_processor(self.original_attn_processors)
968
+
969
+ def unload_lora(self):
970
+ """Unloads LoRA weights."""
971
+ deprecate(
972
+ "unload_lora",
973
+ "0.28.0",
974
+ "Calling `unload_lora()` is deprecated and will be removed in a future version. Please install `peft` and then call `disable_adapters().",
975
+ )
976
+ for module in self.modules():
977
+ if hasattr(module, "set_lora_layer"):
978
+ module.set_lora_layer(None)
979
+
980
+ def get_time_embed(
981
+ self, sample: torch.Tensor, timestep: Union[torch.Tensor, float, int]
982
+ ) -> Optional[torch.Tensor]:
983
+ timesteps = timestep
984
+ if not torch.is_tensor(timesteps):
985
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
986
+ # This would be a good case for the `match` statement (Python 3.10+)
987
+ is_mps = sample.device.type == "mps"
988
+ if isinstance(timestep, float):
989
+ dtype = torch.float32 if is_mps else torch.float64
990
+ else:
991
+ dtype = torch.int32 if is_mps else torch.int64
992
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
993
+ elif len(timesteps.shape) == 0:
994
+ timesteps = timesteps[None].to(sample.device)
995
+
996
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
997
+ timesteps = timesteps.expand(sample.shape[0])
998
+
999
+ t_emb = self.time_proj(timesteps)
1000
+ # `Timesteps` does not contain any weights and will always return f32 tensors
1001
+ # but time_embedding might actually be running in fp16. so we need to cast here.
1002
+ # there might be better ways to encapsulate this.
1003
+ t_emb = t_emb.to(dtype=sample.dtype)
1004
+ return t_emb
1005
+
1006
+ def get_class_embed(self, sample: torch.Tensor, class_labels: Optional[torch.Tensor]) -> Optional[torch.Tensor]:
1007
+ class_emb = None
1008
+ if self.class_embedding is not None:
1009
+ if class_labels is None:
1010
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
1011
+
1012
+ if self.config.class_embed_type == "timestep":
1013
+ class_labels = self.time_proj(class_labels)
1014
+
1015
+ # `Timesteps` does not contain any weights and will always return f32 tensors
1016
+ # there might be better ways to encapsulate this.
1017
+ class_labels = class_labels.to(dtype=sample.dtype)
1018
+
1019
+ class_emb = self.class_embedding(class_labels).to(dtype=sample.dtype)
1020
+ return class_emb
1021
+
1022
+ def get_aug_embed(
1023
+ self, emb: torch.Tensor, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
1024
+ ) -> Optional[torch.Tensor]:
1025
+ aug_emb = None
1026
+ if self.config.addition_embed_type == "text":
1027
+ aug_emb = self.add_embedding(encoder_hidden_states)
1028
+ elif self.config.addition_embed_type == "text_image":
1029
+ # Kandinsky 2.1 - style
1030
+ if "image_embeds" not in added_cond_kwargs:
1031
+ raise ValueError(
1032
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1033
+ )
1034
+
1035
+ image_embs = added_cond_kwargs.get("image_embeds")
1036
+ text_embs = added_cond_kwargs.get("text_embeds", encoder_hidden_states)
1037
+ aug_emb = self.add_embedding(text_embs, image_embs)
1038
+ elif self.config.addition_embed_type == "text_time":
1039
+ # SDXL - style
1040
+ if "text_embeds" not in added_cond_kwargs:
1041
+ raise ValueError(
1042
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
1043
+ )
1044
+ text_embeds = added_cond_kwargs.get("text_embeds")
1045
+ if "time_ids" not in added_cond_kwargs:
1046
+ raise ValueError(
1047
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
1048
+ )
1049
+ time_ids = added_cond_kwargs.get("time_ids")
1050
+ time_embeds = self.add_time_proj(time_ids.flatten())
1051
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
1052
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
1053
+ add_embeds = add_embeds.to(emb.dtype)
1054
+ aug_emb = self.add_embedding(add_embeds)
1055
+ elif self.config.addition_embed_type == "image":
1056
+ # Kandinsky 2.2 - style
1057
+ if "image_embeds" not in added_cond_kwargs:
1058
+ raise ValueError(
1059
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image' which requires the keyword argument `image_embeds` to be passed in `added_cond_kwargs`"
1060
+ )
1061
+ image_embs = added_cond_kwargs.get("image_embeds")
1062
+ aug_emb = self.add_embedding(image_embs)
1063
+ elif self.config.addition_embed_type == "image_hint":
1064
+ # Kandinsky 2.2 - style
1065
+ if "image_embeds" not in added_cond_kwargs or "hint" not in added_cond_kwargs:
1066
+ raise ValueError(
1067
+ f"{self.__class__} has the config param `addition_embed_type` set to 'image_hint' which requires the keyword arguments `image_embeds` and `hint` to be passed in `added_cond_kwargs`"
1068
+ )
1069
+ image_embs = added_cond_kwargs.get("image_embeds")
1070
+ hint = added_cond_kwargs.get("hint")
1071
+ aug_emb = self.add_embedding(image_embs, hint)
1072
+ return aug_emb
1073
+
1074
+ def process_encoder_hidden_states(
1075
+ self, encoder_hidden_states: torch.Tensor, added_cond_kwargs: Dict[str, Any]
1076
+ ) -> torch.Tensor:
1077
+ if self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_proj":
1078
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states)
1079
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "text_image_proj":
1080
+ # Kandinsky 2.1 - style
1081
+ if "image_embeds" not in added_cond_kwargs:
1082
+ raise ValueError(
1083
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'text_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1084
+ )
1085
+
1086
+ image_embeds = added_cond_kwargs.get("image_embeds")
1087
+ encoder_hidden_states = self.encoder_hid_proj(encoder_hidden_states, image_embeds)
1088
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "image_proj":
1089
+ # Kandinsky 2.2 - style
1090
+ if "image_embeds" not in added_cond_kwargs:
1091
+ raise ValueError(
1092
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1093
+ )
1094
+ image_embeds = added_cond_kwargs.get("image_embeds")
1095
+ encoder_hidden_states = self.encoder_hid_proj(image_embeds)
1096
+ elif self.encoder_hid_proj is not None and self.config.encoder_hid_dim_type == "ip_image_proj":
1097
+ if "image_embeds" not in added_cond_kwargs:
1098
+ raise ValueError(
1099
+ f"{self.__class__} has the config param `encoder_hid_dim_type` set to 'ip_image_proj' which requires the keyword argument `image_embeds` to be passed in `added_conditions`"
1100
+ )
1101
+ image_embeds = added_cond_kwargs.get("image_embeds")
1102
+ image_embeds = self.encoder_hid_proj(image_embeds)
1103
+ encoder_hidden_states = (encoder_hidden_states, image_embeds)
1104
+ return encoder_hidden_states
1105
+
1106
+ def forward(
1107
+ self,
1108
+ sample: torch.FloatTensor,
1109
+ timestep: Union[torch.Tensor, float, int],
1110
+ encoder_hidden_states: torch.Tensor,
1111
+ class_labels: Optional[torch.Tensor] = None,
1112
+ timestep_cond: Optional[torch.Tensor] = None,
1113
+ attention_mask: Optional[torch.Tensor] = None,
1114
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1115
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
1116
+ down_block_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
1117
+ mid_block_additional_residual: Optional[torch.Tensor] = None,
1118
+ down_intrablock_additional_residuals: Optional[Tuple[torch.Tensor]] = None,
1119
+ encoder_attention_mask: Optional[torch.Tensor] = None,
1120
+ return_dict: bool = True,
1121
+ ) -> Union[UNet2DConditionOutput, Tuple]:
1122
+ r"""
1123
+ The [`UNet2DConditionModel`] forward method.
1124
+ Args:
1125
+ sample (`torch.FloatTensor`):
1126
+ The noisy input tensor with the following shape `(batch, channel, height, width)`.
1127
+ timestep (`torch.FloatTensor` or `float` or `int`): The number of timesteps to denoise an input.
1128
+ encoder_hidden_states (`torch.FloatTensor`):
1129
+ The encoder hidden states with shape `(batch, sequence_length, feature_dim)`.
1130
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
1131
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
1132
+ timestep_cond: (`torch.Tensor`, *optional*, defaults to `None`):
1133
+ Conditional embeddings for timestep. If provided, the embeddings will be summed with the samples passed
1134
+ through the `self.time_embedding` layer to obtain the timestep embeddings.
1135
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
1136
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
1137
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
1138
+ negative values to the attention scores corresponding to "discard" tokens.
1139
+ cross_attention_kwargs (`dict`, *optional*):
1140
+ A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
1141
+ `self.processor` in
1142
+ [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
1143
+ added_cond_kwargs: (`dict`, *optional*):
1144
+ A kwargs dictionary containing additional embeddings that if specified are added to the embeddings that
1145
+ are passed along to the UNet blocks.
1146
+ down_block_additional_residuals: (`tuple` of `torch.Tensor`, *optional*):
1147
+ A tuple of tensors that if specified are added to the residuals of down unet blocks.
1148
+ mid_block_additional_residual: (`torch.Tensor`, *optional*):
1149
+ A tensor that if specified is added to the residual of the middle unet block.
1150
+ down_intrablock_additional_residuals (`tuple` of `torch.Tensor`, *optional*):
1151
+ additional residuals to be added within UNet down blocks, for example from T2I-Adapter side model(s)
1152
+ encoder_attention_mask (`torch.Tensor`):
1153
+ A cross-attention mask of shape `(batch, sequence_length)` is applied to `encoder_hidden_states`. If
1154
+ `True` the mask is kept, otherwise if `False` it is discarded. Mask will be converted into a bias,
1155
+ which adds large negative values to the attention scores corresponding to "discard" tokens.
1156
+ return_dict (`bool`, *optional*, defaults to `True`):
1157
+ Whether or not to return a [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] instead of a plain
1158
+ tuple.
1159
+ Returns:
1160
+ [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] or `tuple`:
1161
+ If `return_dict` is True, an [`~models.unets.unet_2d_condition.UNet2DConditionOutput`] is returned,
1162
+ otherwise a `tuple` is returned where the first element is the sample tensor.
1163
+ """
1164
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
1165
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layers).
1166
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
1167
+ # on the fly if necessary.
1168
+ default_overall_up_factor = 2**self.num_upsamplers
1169
+
1170
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
1171
+ forward_upsample_size = False
1172
+ upsample_size = None
1173
+
1174
+ for dim in sample.shape[-2:]:
1175
+ if dim % default_overall_up_factor != 0:
1176
+ # Forward upsample size to force interpolation output size.
1177
+ forward_upsample_size = True
1178
+ break
1179
+
1180
+ # ensure attention_mask is a bias, and give it a singleton query_tokens dimension
1181
+ # expects mask of shape:
1182
+ # [batch, key_tokens]
1183
+ # adds singleton query_tokens dimension:
1184
+ # [batch, 1, key_tokens]
1185
+ # this helps to broadcast it as a bias over attention scores, which will be in one of the following shapes:
1186
+ # [batch, heads, query_tokens, key_tokens] (e.g. torch sdp attn)
1187
+ # [batch * heads, query_tokens, key_tokens] (e.g. xformers or classic attn)
1188
+ if attention_mask is not None:
1189
+ # assume that mask is expressed as:
1190
+ # (1 = keep, 0 = discard)
1191
+ # convert mask into a bias that can be added to attention scores:
1192
+ # (keep = +0, discard = -10000.0)
1193
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
1194
+ attention_mask = attention_mask.unsqueeze(1)
1195
+
1196
+ # convert encoder_attention_mask to a bias the same way we do for attention_mask
1197
+ if encoder_attention_mask is not None:
1198
+ encoder_attention_mask = (1 - encoder_attention_mask.to(sample.dtype)) * -10000.0
1199
+ encoder_attention_mask = encoder_attention_mask.unsqueeze(1)
1200
+
1201
+ # 0. center input if necessary
1202
+ if self.config.center_input_sample:
1203
+ sample = 2 * sample - 1.0
1204
+
1205
+ # 1. time
1206
+ t_emb = self.get_time_embed(sample=sample, timestep=timestep)
1207
+ emb = self.time_embedding(t_emb, timestep_cond)
1208
+ aug_emb = None
1209
+
1210
+ class_emb = self.get_class_embed(sample=sample, class_labels=class_labels)
1211
+ if class_emb is not None:
1212
+ if self.config.class_embeddings_concat:
1213
+ emb = torch.cat([emb, class_emb], dim=-1)
1214
+ else:
1215
+ emb = emb + class_emb
1216
+
1217
+ aug_emb = self.get_aug_embed(
1218
+ emb=emb, encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
1219
+ )
1220
+ if self.config.addition_embed_type == "image_hint":
1221
+ aug_emb, hint = aug_emb
1222
+ sample = torch.cat([sample, hint], dim=1)
1223
+
1224
+ emb = emb + aug_emb if aug_emb is not None else emb
1225
+
1226
+ if self.time_embed_act is not None:
1227
+ emb = self.time_embed_act(emb)
1228
+
1229
+ encoder_hidden_states = self.process_encoder_hidden_states(
1230
+ encoder_hidden_states=encoder_hidden_states, added_cond_kwargs=added_cond_kwargs
1231
+ )
1232
+
1233
+ # 2. pre-process
1234
+ sample = self.conv_in(sample)
1235
+
1236
+ # 2.5 GLIGEN position net
1237
+ if cross_attention_kwargs is not None and cross_attention_kwargs.get("gligen", None) is not None:
1238
+ cross_attention_kwargs = cross_attention_kwargs.copy()
1239
+ gligen_args = cross_attention_kwargs.pop("gligen")
1240
+ cross_attention_kwargs["gligen"] = {"objs": self.position_net(**gligen_args)}
1241
+
1242
+ # 3. down
1243
+ # we're popping the `scale` instead of getting it because otherwise `scale` will be propagated
1244
+ # to the internal blocks and will raise deprecation warnings. this will be confusing for our users.
1245
+ if cross_attention_kwargs is not None:
1246
+ cross_attention_kwargs = cross_attention_kwargs.copy()
1247
+ lora_scale = cross_attention_kwargs.pop("scale", 1.0)
1248
+ else:
1249
+ lora_scale = 1.0
1250
+
1251
+ if USE_PEFT_BACKEND:
1252
+ # weight the lora layers by setting `lora_scale` for each PEFT layer
1253
+ scale_lora_layers(self, lora_scale)
1254
+
1255
+ is_controlnet = mid_block_additional_residual is not None and down_block_additional_residuals is not None
1256
+ # using new arg down_intrablock_additional_residuals for T2I-Adapters, to distinguish from controlnets
1257
+ is_adapter = down_intrablock_additional_residuals is not None
1258
+ # maintain backward compatibility for legacy usage, where
1259
+ # T2I-Adapter and ControlNet both use down_block_additional_residuals arg
1260
+ # but can only use one or the other
1261
+ if not is_adapter and mid_block_additional_residual is None and down_block_additional_residuals is not None:
1262
+ deprecate(
1263
+ "T2I should not use down_block_additional_residuals",
1264
+ "1.3.0",
1265
+ "Passing intrablock residual connections with `down_block_additional_residuals` is deprecated \
1266
+ and will be removed in diffusers 1.3.0. `down_block_additional_residuals` should only be used \
1267
+ for ControlNet. Please make sure use `down_intrablock_additional_residuals` instead. ",
1268
+ standard_warn=False,
1269
+ )
1270
+ down_intrablock_additional_residuals = down_block_additional_residuals
1271
+ is_adapter = True
1272
+
1273
+ down_block_res_samples = (sample,)
1274
+ for downsample_block in self.down_blocks:
1275
+ if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention:
1276
+ # For t2i-adapter CrossAttnDownBlock2D
1277
+ additional_residuals = {}
1278
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1279
+ additional_residuals["additional_residuals"] = down_intrablock_additional_residuals.pop(0)
1280
+
1281
+ sample, res_samples = downsample_block(
1282
+ hidden_states=sample,
1283
+ temb=emb,
1284
+ encoder_hidden_states=encoder_hidden_states,
1285
+ attention_mask=attention_mask,
1286
+ cross_attention_kwargs=cross_attention_kwargs,
1287
+ encoder_attention_mask=encoder_attention_mask,
1288
+ **additional_residuals,
1289
+ )
1290
+ else:
1291
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
1292
+ if is_adapter and len(down_intrablock_additional_residuals) > 0:
1293
+ sample += down_intrablock_additional_residuals.pop(0)
1294
+
1295
+ down_block_res_samples += res_samples
1296
+
1297
+ if is_controlnet:
1298
+ new_down_block_res_samples = ()
1299
+
1300
+ for down_block_additional_residual, down_block_res_sample in zip(
1301
+ down_block_additional_residuals, down_block_res_samples
1302
+ ):
1303
+ down_block_res_sample_tuple = (down_block_additional_residual, down_block_res_sample)
1304
+ new_down_block_res_samples = new_down_block_res_samples + (down_block_res_sample_tuple,)
1305
+
1306
+ down_block_res_samples = new_down_block_res_samples
1307
+
1308
+ # 4. mid
1309
+ if self.mid_block is not None:
1310
+ if hasattr(self.mid_block, "has_cross_attention") and self.mid_block.has_cross_attention:
1311
+ sample = self.mid_block(
1312
+ sample,
1313
+ emb,
1314
+ encoder_hidden_states=encoder_hidden_states,
1315
+ attention_mask=attention_mask,
1316
+ cross_attention_kwargs=cross_attention_kwargs,
1317
+ encoder_attention_mask=encoder_attention_mask,
1318
+ )
1319
+ else:
1320
+ sample = self.mid_block(sample, emb)
1321
+
1322
+ # To support T2I-Adapter-XL
1323
+ if (
1324
+ is_adapter
1325
+ and len(down_intrablock_additional_residuals) > 0
1326
+ and sample.shape == down_intrablock_additional_residuals[0].shape
1327
+ ):
1328
+ sample += down_intrablock_additional_residuals.pop(0)
1329
+
1330
+ if is_controlnet:
1331
+ sample = self.mid_zero_SFT((mid_block_additional_residual, sample),)
1332
+
1333
+ # 5. up
1334
+ for i, upsample_block in enumerate(self.up_blocks):
1335
+ is_final_block = i == len(self.up_blocks) - 1
1336
+
1337
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
1338
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)]
1339
+
1340
+ # if we have not reached the final block and need to forward the
1341
+ # upsample size, we do it here
1342
+ if not is_final_block and forward_upsample_size:
1343
+ upsample_size = down_block_res_samples[-1].shape[2:]
1344
+
1345
+ if hasattr(upsample_block, "has_cross_attention") and upsample_block.has_cross_attention:
1346
+ sample = upsample_block(
1347
+ hidden_states=sample,
1348
+ temb=emb,
1349
+ res_hidden_states_tuple=res_samples,
1350
+ encoder_hidden_states=encoder_hidden_states,
1351
+ cross_attention_kwargs=cross_attention_kwargs,
1352
+ upsample_size=upsample_size,
1353
+ attention_mask=attention_mask,
1354
+ encoder_attention_mask=encoder_attention_mask,
1355
+ )
1356
+ else:
1357
+ sample = upsample_block(
1358
+ hidden_states=sample,
1359
+ temb=emb,
1360
+ res_hidden_states_tuple=res_samples,
1361
+ upsample_size=upsample_size,
1362
+ )
1363
+
1364
+ # 6. post-process
1365
+ if self.conv_norm_out:
1366
+ sample = self.conv_norm_out(sample)
1367
+ sample = self.conv_act(sample)
1368
+ sample = self.conv_out(sample)
1369
+
1370
+ if USE_PEFT_BACKEND:
1371
+ # remove `lora_scale` from each PEFT layer
1372
+ unscale_lora_layers(self, lora_scale)
1373
+
1374
+ if not return_dict:
1375
+ return (sample,)
1376
+
1377
+ return UNet2DConditionOutput(sample=sample)