|
|
|
|
|
from typing import Any, Dict, Optional
|
|
|
|
import torch
|
|
from diffusers.models.attention import AdaLayerNorm, AdaLayerNormZero, Attention, FeedForward, GatedSelfAttentionDense
|
|
from diffusers.models.embeddings import SinusoidalPositionalEmbedding
|
|
from einops import rearrange
|
|
from torch import nn
|
|
|
|
|
|
class BasicTransformerBlock(nn.Module):
|
|
r"""
|
|
A basic Transformer block.
|
|
|
|
Parameters:
|
|
dim (`int`): The number of channels in the input and output.
|
|
num_attention_heads (`int`): The number of heads to use for multi-head attention.
|
|
attention_head_dim (`int`): The number of channels in each head.
|
|
dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use.
|
|
cross_attention_dim (`int`, *optional*): The size of the encoder_hidden_states vector for cross attention.
|
|
activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward.
|
|
num_embeds_ada_norm (:
|
|
obj: `int`, *optional*): The number of diffusion steps used during training. See `Transformer2DModel`.
|
|
attention_bias (:
|
|
obj: `bool`, *optional*, defaults to `False`): Configure if the attentions should contain a bias parameter.
|
|
only_cross_attention (`bool`, *optional*):
|
|
Whether to use only cross-attention layers. In this case two cross attention layers are used.
|
|
double_self_attention (`bool`, *optional*):
|
|
Whether to use two self-attention layers. In this case no cross attention layers are used.
|
|
upcast_attention (`bool`, *optional*):
|
|
Whether to upcast the attention computation to float32. This is useful for mixed precision training.
|
|
norm_elementwise_affine (`bool`, *optional*, defaults to `True`):
|
|
Whether to use learnable elementwise affine parameters for normalization.
|
|
norm_type (`str`, *optional*, defaults to `"layer_norm"`):
|
|
The normalization layer to use. Can be `"layer_norm"`, `"ada_norm"` or `"ada_norm_zero"`.
|
|
final_dropout (`bool` *optional*, defaults to False):
|
|
Whether to apply a final dropout after the last feed-forward layer.
|
|
attention_type (`str`, *optional*, defaults to `"default"`):
|
|
The type of attention to use. Can be `"default"` or `"gated"` or `"gated-text-image"`.
|
|
positional_embeddings (`str`, *optional*, defaults to `None`):
|
|
The type of positional embeddings to apply to.
|
|
num_positional_embeddings (`int`, *optional*, defaults to `None`):
|
|
The maximum number of positional embeddings to apply.
|
|
"""
|
|
|
|
def __init__(
|
|
self,
|
|
dim: int,
|
|
num_attention_heads: int,
|
|
attention_head_dim: int,
|
|
dropout=0.0,
|
|
cross_attention_dim: Optional[int] = None,
|
|
activation_fn: str = "geglu",
|
|
num_embeds_ada_norm: Optional[int] = None,
|
|
attention_bias: bool = False,
|
|
only_cross_attention: bool = False,
|
|
double_self_attention: bool = False,
|
|
upcast_attention: bool = False,
|
|
norm_elementwise_affine: bool = True,
|
|
norm_type: str = "layer_norm",
|
|
norm_eps: float = 1e-5,
|
|
final_dropout: bool = False,
|
|
attention_type: str = "default",
|
|
positional_embeddings: Optional[str] = None,
|
|
num_positional_embeddings: Optional[int] = None,
|
|
):
|
|
super().__init__()
|
|
self.only_cross_attention = only_cross_attention
|
|
|
|
self.use_ada_layer_norm_zero = (
|
|
num_embeds_ada_norm is not None
|
|
) and norm_type == "ada_norm_zero"
|
|
self.use_ada_layer_norm = (
|
|
num_embeds_ada_norm is not None
|
|
) and norm_type == "ada_norm"
|
|
self.use_ada_layer_norm_single = norm_type == "ada_norm_single"
|
|
self.use_layer_norm = norm_type == "layer_norm"
|
|
|
|
if norm_type in ("ada_norm", "ada_norm_zero") and num_embeds_ada_norm is None:
|
|
raise ValueError(
|
|
f"`norm_type` is set to {norm_type}, but `num_embeds_ada_norm` is not defined. Please make sure to"
|
|
f" define `num_embeds_ada_norm` if setting `norm_type` to {norm_type}."
|
|
)
|
|
|
|
if positional_embeddings and (num_positional_embeddings is None):
|
|
raise ValueError(
|
|
"If `positional_embedding` type is defined, `num_positition_embeddings` must also be defined."
|
|
)
|
|
|
|
if positional_embeddings == "sinusoidal":
|
|
self.pos_embed = SinusoidalPositionalEmbedding(
|
|
dim, max_seq_length=num_positional_embeddings
|
|
)
|
|
else:
|
|
self.pos_embed = None
|
|
|
|
|
|
|
|
if self.use_ada_layer_norm:
|
|
self.norm1 = AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
elif self.use_ada_layer_norm_zero:
|
|
self.norm1 = AdaLayerNormZero(dim, num_embeds_ada_norm)
|
|
else:
|
|
self.norm1 = nn.LayerNorm(
|
|
dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps
|
|
)
|
|
|
|
self.attn1 = Attention(
|
|
query_dim=dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
cross_attention_dim=cross_attention_dim if only_cross_attention else None,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
|
|
|
|
if cross_attention_dim is not None or double_self_attention:
|
|
|
|
|
|
|
|
self.norm2 = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(
|
|
dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps
|
|
)
|
|
)
|
|
self.attn2 = Attention(
|
|
query_dim=dim,
|
|
cross_attention_dim=cross_attention_dim
|
|
if not double_self_attention
|
|
else None,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
else:
|
|
self.norm2 = None
|
|
self.attn2 = None
|
|
|
|
|
|
if not self.use_ada_layer_norm_single:
|
|
self.norm3 = nn.LayerNorm(
|
|
dim, elementwise_affine=norm_elementwise_affine, eps=norm_eps
|
|
)
|
|
|
|
self.ff = FeedForward(
|
|
dim,
|
|
dropout=dropout,
|
|
activation_fn=activation_fn,
|
|
final_dropout=final_dropout,
|
|
)
|
|
|
|
|
|
if attention_type == "gated" or attention_type == "gated-text-image":
|
|
self.fuser = GatedSelfAttentionDense(
|
|
dim, cross_attention_dim, num_attention_heads, attention_head_dim
|
|
)
|
|
|
|
|
|
if self.use_ada_layer_norm_single:
|
|
self.scale_shift_table = nn.Parameter(torch.randn(6, dim) / dim**0.5)
|
|
|
|
|
|
self._chunk_size = None
|
|
self._chunk_dim = 0
|
|
|
|
def set_chunk_feed_forward(self, chunk_size: Optional[int], dim: int = 0):
|
|
|
|
self._chunk_size = chunk_size
|
|
self._chunk_dim = dim
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.FloatTensor,
|
|
attention_mask: Optional[torch.FloatTensor] = None,
|
|
encoder_hidden_states: Optional[torch.FloatTensor] = None,
|
|
encoder_attention_mask: Optional[torch.FloatTensor] = None,
|
|
timestep: Optional[torch.LongTensor] = None,
|
|
cross_attention_kwargs: Dict[str, Any] = None,
|
|
class_labels: Optional[torch.LongTensor] = None,
|
|
) -> torch.FloatTensor:
|
|
|
|
|
|
batch_size = hidden_states.shape[0]
|
|
|
|
if self.use_ada_layer_norm:
|
|
norm_hidden_states = self.norm1(hidden_states, timestep)
|
|
elif self.use_ada_layer_norm_zero:
|
|
norm_hidden_states, gate_msa, shift_mlp, scale_mlp, gate_mlp = self.norm1(
|
|
hidden_states, timestep, class_labels, hidden_dtype=hidden_states.dtype
|
|
)
|
|
elif self.use_layer_norm:
|
|
norm_hidden_states = self.norm1(hidden_states)
|
|
elif self.use_ada_layer_norm_single:
|
|
shift_msa, scale_msa, gate_msa, shift_mlp, scale_mlp, gate_mlp = (
|
|
self.scale_shift_table[None] + timestep.reshape(batch_size, 6, -1)
|
|
).chunk(6, dim=1)
|
|
norm_hidden_states = self.norm1(hidden_states)
|
|
norm_hidden_states = norm_hidden_states * (1 + scale_msa) + shift_msa
|
|
norm_hidden_states = norm_hidden_states.squeeze(1)
|
|
else:
|
|
raise ValueError("Incorrect norm used")
|
|
|
|
if self.pos_embed is not None:
|
|
norm_hidden_states = self.pos_embed(norm_hidden_states)
|
|
|
|
|
|
lora_scale = (
|
|
cross_attention_kwargs.get("scale", 1.0)
|
|
if cross_attention_kwargs is not None
|
|
else 1.0
|
|
)
|
|
|
|
|
|
cross_attention_kwargs = (
|
|
cross_attention_kwargs.copy() if cross_attention_kwargs is not None else {}
|
|
)
|
|
gligen_kwargs = cross_attention_kwargs.pop("gligen", None)
|
|
|
|
attn_output = self.attn1(
|
|
norm_hidden_states,
|
|
encoder_hidden_states=encoder_hidden_states
|
|
if self.only_cross_attention
|
|
else None,
|
|
attention_mask=attention_mask,
|
|
**cross_attention_kwargs,
|
|
)
|
|
if self.use_ada_layer_norm_zero:
|
|
attn_output = gate_msa.unsqueeze(1) * attn_output
|
|
elif self.use_ada_layer_norm_single:
|
|
attn_output = gate_msa * attn_output
|
|
|
|
hidden_states = attn_output + hidden_states
|
|
if hidden_states.ndim == 4:
|
|
hidden_states = hidden_states.squeeze(1)
|
|
|
|
|
|
if gligen_kwargs is not None:
|
|
hidden_states = self.fuser(hidden_states, gligen_kwargs["objs"])
|
|
|
|
|
|
if self.attn2 is not None:
|
|
if self.use_ada_layer_norm:
|
|
norm_hidden_states = self.norm2(hidden_states, timestep)
|
|
elif self.use_ada_layer_norm_zero or self.use_layer_norm:
|
|
norm_hidden_states = self.norm2(hidden_states)
|
|
elif self.use_ada_layer_norm_single:
|
|
|
|
|
|
norm_hidden_states = hidden_states
|
|
else:
|
|
raise ValueError("Incorrect norm")
|
|
|
|
if self.pos_embed is not None and self.use_ada_layer_norm_single is False:
|
|
norm_hidden_states = self.pos_embed(norm_hidden_states)
|
|
|
|
attn_output = self.attn2(
|
|
norm_hidden_states,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
attention_mask=encoder_attention_mask,
|
|
**cross_attention_kwargs,
|
|
)
|
|
hidden_states = attn_output + hidden_states
|
|
|
|
|
|
if not self.use_ada_layer_norm_single:
|
|
norm_hidden_states = self.norm3(hidden_states)
|
|
|
|
if self.use_ada_layer_norm_zero:
|
|
norm_hidden_states = (
|
|
norm_hidden_states * (1 + scale_mlp[:, None]) + shift_mlp[:, None]
|
|
)
|
|
|
|
if self.use_ada_layer_norm_single:
|
|
norm_hidden_states = self.norm2(hidden_states)
|
|
norm_hidden_states = norm_hidden_states * (1 + scale_mlp) + shift_mlp
|
|
|
|
ff_output = self.ff(norm_hidden_states, scale=lora_scale)
|
|
|
|
if self.use_ada_layer_norm_zero:
|
|
ff_output = gate_mlp.unsqueeze(1) * ff_output
|
|
elif self.use_ada_layer_norm_single:
|
|
ff_output = gate_mlp * ff_output
|
|
|
|
hidden_states = ff_output + hidden_states
|
|
if hidden_states.ndim == 4:
|
|
hidden_states = hidden_states.squeeze(1)
|
|
|
|
return hidden_states
|
|
|
|
|
|
class TemporalBasicTransformerBlock(nn.Module):
|
|
def __init__(
|
|
self,
|
|
dim: int,
|
|
num_attention_heads: int,
|
|
attention_head_dim: int,
|
|
dropout=0.0,
|
|
cross_attention_dim: Optional[int] = None,
|
|
activation_fn: str = "geglu",
|
|
num_embeds_ada_norm: Optional[int] = None,
|
|
attention_bias: bool = False,
|
|
only_cross_attention: bool = False,
|
|
upcast_attention: bool = False,
|
|
unet_use_cross_frame_attention=None,
|
|
unet_use_temporal_attention=None,
|
|
):
|
|
super().__init__()
|
|
self.only_cross_attention = only_cross_attention
|
|
self.use_ada_layer_norm = num_embeds_ada_norm is not None
|
|
self.unet_use_cross_frame_attention = unet_use_cross_frame_attention
|
|
self.unet_use_temporal_attention = unet_use_temporal_attention
|
|
|
|
|
|
self.attn1 = Attention(
|
|
query_dim=dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
self.norm1 = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(dim)
|
|
)
|
|
|
|
|
|
self.attn1_5 = Attention(
|
|
query_dim=dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
self.norm1_5 = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(dim)
|
|
)
|
|
|
|
|
|
if cross_attention_dim is not None:
|
|
self.attn2 = Attention(
|
|
query_dim=dim,
|
|
cross_attention_dim=cross_attention_dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
else:
|
|
self.attn2 = None
|
|
|
|
if cross_attention_dim is not None:
|
|
self.norm2 = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(dim)
|
|
)
|
|
else:
|
|
self.norm2 = None
|
|
|
|
|
|
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn)
|
|
self.norm3 = nn.LayerNorm(dim)
|
|
self.use_ada_layer_norm_zero = False
|
|
|
|
|
|
assert unet_use_temporal_attention is not None
|
|
if unet_use_temporal_attention:
|
|
self.attn_temp = Attention(
|
|
query_dim=dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
nn.init.zeros_(self.attn_temp.to_out[0].weight.data)
|
|
self.norm_temp = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(dim)
|
|
)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states,
|
|
encoder_hidden_states=None,
|
|
timestep=None,
|
|
attention_mask=None,
|
|
video_length=None,
|
|
):
|
|
norm_hidden_states = (
|
|
self.norm1(hidden_states, timestep)
|
|
if self.use_ada_layer_norm
|
|
else self.norm1(hidden_states)
|
|
)
|
|
|
|
if self.unet_use_cross_frame_attention:
|
|
hidden_states = (
|
|
self.attn1(
|
|
norm_hidden_states,
|
|
attention_mask=attention_mask,
|
|
video_length=video_length,
|
|
)
|
|
+ hidden_states
|
|
)
|
|
else:
|
|
hidden_states = (
|
|
self.attn1(norm_hidden_states, attention_mask=attention_mask)
|
|
+ hidden_states
|
|
)
|
|
|
|
norm_hidden_states = (
|
|
self.norm1_5(hidden_states, timestep)
|
|
if self.use_ada_layer_norm
|
|
else self.norm1_5(hidden_states)
|
|
)
|
|
|
|
if self.unet_use_cross_frame_attention:
|
|
hidden_states = (
|
|
self.attn1_5(
|
|
norm_hidden_states,
|
|
attention_mask=attention_mask,
|
|
video_length=video_length,
|
|
)
|
|
+ hidden_states
|
|
)
|
|
else:
|
|
hidden_states = (
|
|
self.attn1_5(norm_hidden_states, attention_mask=attention_mask)
|
|
+ hidden_states
|
|
)
|
|
|
|
if self.attn2 is not None:
|
|
|
|
norm_hidden_states = (
|
|
self.norm2(hidden_states, timestep)
|
|
if self.use_ada_layer_norm
|
|
else self.norm2(hidden_states)
|
|
)
|
|
hidden_states = (
|
|
self.attn2(
|
|
norm_hidden_states,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
attention_mask=attention_mask,
|
|
)
|
|
+ hidden_states
|
|
)
|
|
|
|
|
|
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
|
|
|
|
|
|
if self.unet_use_temporal_attention:
|
|
d = hidden_states.shape[1]
|
|
hidden_states = rearrange(
|
|
hidden_states, "(b f) d c -> (b d) f c", f=video_length
|
|
)
|
|
norm_hidden_states = (
|
|
self.norm_temp(hidden_states, timestep)
|
|
if self.use_ada_layer_norm
|
|
else self.norm_temp(hidden_states)
|
|
)
|
|
hidden_states = self.attn_temp(norm_hidden_states) + hidden_states
|
|
hidden_states = rearrange(hidden_states, "(b d) f c -> (b f) d c", d=d)
|
|
|
|
return hidden_states
|
|
|
|
class TemporalBasicTransformerBlockOld(nn.Module):
|
|
def __init__(
|
|
self,
|
|
dim: int,
|
|
num_attention_heads: int,
|
|
attention_head_dim: int,
|
|
dropout=0.0,
|
|
cross_attention_dim: Optional[int] = None,
|
|
activation_fn: str = "geglu",
|
|
num_embeds_ada_norm: Optional[int] = None,
|
|
attention_bias: bool = False,
|
|
only_cross_attention: bool = False,
|
|
upcast_attention: bool = False,
|
|
unet_use_cross_frame_attention=None,
|
|
unet_use_temporal_attention=None,
|
|
):
|
|
super().__init__()
|
|
self.only_cross_attention = only_cross_attention
|
|
self.use_ada_layer_norm = num_embeds_ada_norm is not None
|
|
self.unet_use_cross_frame_attention = unet_use_cross_frame_attention
|
|
self.unet_use_temporal_attention = unet_use_temporal_attention
|
|
|
|
|
|
self.attn1 = Attention(
|
|
query_dim=dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
self.norm1 = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(dim)
|
|
)
|
|
|
|
|
|
if cross_attention_dim is not None:
|
|
self.attn2 = Attention(
|
|
query_dim=dim,
|
|
cross_attention_dim=cross_attention_dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
else:
|
|
self.attn2 = None
|
|
|
|
if cross_attention_dim is not None:
|
|
self.norm2 = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(dim)
|
|
)
|
|
else:
|
|
self.norm2 = None
|
|
|
|
|
|
self.ff = FeedForward(dim, dropout=dropout, activation_fn=activation_fn)
|
|
self.norm3 = nn.LayerNorm(dim)
|
|
self.use_ada_layer_norm_zero = False
|
|
|
|
|
|
assert unet_use_temporal_attention is not None
|
|
if unet_use_temporal_attention:
|
|
self.attn_temp = Attention(
|
|
query_dim=dim,
|
|
heads=num_attention_heads,
|
|
dim_head=attention_head_dim,
|
|
dropout=dropout,
|
|
bias=attention_bias,
|
|
upcast_attention=upcast_attention,
|
|
)
|
|
nn.init.zeros_(self.attn_temp.to_out[0].weight.data)
|
|
self.norm_temp = (
|
|
AdaLayerNorm(dim, num_embeds_ada_norm)
|
|
if self.use_ada_layer_norm
|
|
else nn.LayerNorm(dim)
|
|
)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states,
|
|
encoder_hidden_states=None,
|
|
timestep=None,
|
|
attention_mask=None,
|
|
video_length=None,
|
|
):
|
|
norm_hidden_states = (
|
|
self.norm1(hidden_states, timestep)
|
|
if self.use_ada_layer_norm
|
|
else self.norm1(hidden_states)
|
|
)
|
|
|
|
if self.unet_use_cross_frame_attention:
|
|
hidden_states = (
|
|
self.attn1(
|
|
norm_hidden_states,
|
|
attention_mask=attention_mask,
|
|
video_length=video_length,
|
|
)
|
|
+ hidden_states
|
|
)
|
|
else:
|
|
hidden_states = (
|
|
self.attn1(norm_hidden_states, attention_mask=attention_mask)
|
|
+ hidden_states
|
|
)
|
|
|
|
if self.attn2 is not None:
|
|
|
|
norm_hidden_states = (
|
|
self.norm2(hidden_states, timestep)
|
|
if self.use_ada_layer_norm
|
|
else self.norm2(hidden_states)
|
|
)
|
|
hidden_states = (
|
|
self.attn2(
|
|
norm_hidden_states,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
attention_mask=attention_mask,
|
|
)
|
|
+ hidden_states
|
|
)
|
|
|
|
|
|
hidden_states = self.ff(self.norm3(hidden_states)) + hidden_states
|
|
|
|
|
|
if self.unet_use_temporal_attention:
|
|
d = hidden_states.shape[1]
|
|
hidden_states = rearrange(
|
|
hidden_states, "(b f) d c -> (b d) f c", f=video_length
|
|
)
|
|
norm_hidden_states = (
|
|
self.norm_temp(hidden_states, timestep)
|
|
if self.use_ada_layer_norm
|
|
else self.norm_temp(hidden_states)
|
|
)
|
|
hidden_states = self.attn_temp(norm_hidden_states) + hidden_states
|
|
hidden_states = rearrange(hidden_states, "(b d) f c -> (b f) d c", d=d)
|
|
|
|
return hidden_states |