ACCC1380 commited on
Commit
50f885c
1 Parent(s): 499d757

Upload lora-scripts/sd-scripts/XTI_hijack.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. lora-scripts/sd-scripts/XTI_hijack.py +204 -0
lora-scripts/sd-scripts/XTI_hijack.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from library.device_utils import init_ipex
3
+ init_ipex()
4
+
5
+ from typing import Union, List, Optional, Dict, Any, Tuple
6
+ from diffusers.models.unet_2d_condition import UNet2DConditionOutput
7
+
8
+ from library.original_unet import SampleOutput
9
+
10
+
11
+ def unet_forward_XTI(
12
+ self,
13
+ sample: torch.FloatTensor,
14
+ timestep: Union[torch.Tensor, float, int],
15
+ encoder_hidden_states: torch.Tensor,
16
+ class_labels: Optional[torch.Tensor] = None,
17
+ return_dict: bool = True,
18
+ ) -> Union[Dict, Tuple]:
19
+ r"""
20
+ Args:
21
+ sample (`torch.FloatTensor`): (batch, channel, height, width) noisy inputs tensor
22
+ timestep (`torch.FloatTensor` or `float` or `int`): (batch) timesteps
23
+ encoder_hidden_states (`torch.FloatTensor`): (batch, sequence_length, feature_dim) encoder hidden states
24
+ return_dict (`bool`, *optional*, defaults to `True`):
25
+ Whether or not to return a dict instead of a plain tuple.
26
+
27
+ Returns:
28
+ `SampleOutput` or `tuple`:
29
+ `SampleOutput` if `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is the sample tensor.
30
+ """
31
+ # By default samples have to be AT least a multiple of the overall upsampling factor.
32
+ # The overall upsampling factor is equal to 2 ** (# num of upsampling layears).
33
+ # However, the upsampling interpolation output size can be forced to fit any upsampling size
34
+ # on the fly if necessary.
35
+ # デフォルトではサンプルは「2^アップサンプルの数」、つまり64の倍数である必要がある
36
+ # ただそれ以外のサイズにも対応できるように、必要ならアップサンプルのサイズを変更する
37
+ # 多分画質が悪くなるので、64で割り切れるようにしておくのが良い
38
+ default_overall_up_factor = 2**self.num_upsamplers
39
+
40
+ # upsample size should be forwarded when sample is not a multiple of `default_overall_up_factor`
41
+ # 64で割り切れないときはupsamplerにサイズを伝える
42
+ forward_upsample_size = False
43
+ upsample_size = None
44
+
45
+ if any(s % default_overall_up_factor != 0 for s in sample.shape[-2:]):
46
+ # logger.info("Forward upsample size to force interpolation output size.")
47
+ forward_upsample_size = True
48
+
49
+ # 1. time
50
+ timesteps = timestep
51
+ timesteps = self.handle_unusual_timesteps(sample, timesteps) # 変な時だけ処理
52
+
53
+ t_emb = self.time_proj(timesteps)
54
+
55
+ # timesteps does not contain any weights and will always return f32 tensors
56
+ # but time_embedding might actually be running in fp16. so we need to cast here.
57
+ # there might be better ways to encapsulate this.
58
+ # timestepsは重みを含まないので常にfloat32のテンソルを返す
59
+ # しかしtime_embeddingはfp16で動いているかもしれないので、ここでキャストする必要がある
60
+ # time_projでキャストしておけばいいんじゃね?
61
+ t_emb = t_emb.to(dtype=self.dtype)
62
+ emb = self.time_embedding(t_emb)
63
+
64
+ # 2. pre-process
65
+ sample = self.conv_in(sample)
66
+
67
+ # 3. down
68
+ down_block_res_samples = (sample,)
69
+ down_i = 0
70
+ for downsample_block in self.down_blocks:
71
+ # downblockはforwardで必ずencoder_hidden_statesを受け取るようにしても良さそうだけど、
72
+ # まあこちらのほうがわかりやすいかもしれない
73
+ if downsample_block.has_cross_attention:
74
+ sample, res_samples = downsample_block(
75
+ hidden_states=sample,
76
+ temb=emb,
77
+ encoder_hidden_states=encoder_hidden_states[down_i : down_i + 2],
78
+ )
79
+ down_i += 2
80
+ else:
81
+ sample, res_samples = downsample_block(hidden_states=sample, temb=emb)
82
+
83
+ down_block_res_samples += res_samples
84
+
85
+ # 4. mid
86
+ sample = self.mid_block(sample, emb, encoder_hidden_states=encoder_hidden_states[6])
87
+
88
+ # 5. up
89
+ up_i = 7
90
+ for i, upsample_block in enumerate(self.up_blocks):
91
+ is_final_block = i == len(self.up_blocks) - 1
92
+
93
+ res_samples = down_block_res_samples[-len(upsample_block.resnets) :]
94
+ down_block_res_samples = down_block_res_samples[: -len(upsample_block.resnets)] # skip connection
95
+
96
+ # if we have not reached the final block and need to forward the upsample size, we do it here
97
+ # 前述のように最後のブロック以外ではupsample_sizeを伝える
98
+ if not is_final_block and forward_upsample_size:
99
+ upsample_size = down_block_res_samples[-1].shape[2:]
100
+
101
+ if upsample_block.has_cross_attention:
102
+ sample = upsample_block(
103
+ hidden_states=sample,
104
+ temb=emb,
105
+ res_hidden_states_tuple=res_samples,
106
+ encoder_hidden_states=encoder_hidden_states[up_i : up_i + 3],
107
+ upsample_size=upsample_size,
108
+ )
109
+ up_i += 3
110
+ else:
111
+ sample = upsample_block(
112
+ hidden_states=sample, temb=emb, res_hidden_states_tuple=res_samples, upsample_size=upsample_size
113
+ )
114
+
115
+ # 6. post-process
116
+ sample = self.conv_norm_out(sample)
117
+ sample = self.conv_act(sample)
118
+ sample = self.conv_out(sample)
119
+
120
+ if not return_dict:
121
+ return (sample,)
122
+
123
+ return SampleOutput(sample=sample)
124
+
125
+
126
+ def downblock_forward_XTI(
127
+ self, hidden_states, temb=None, encoder_hidden_states=None, attention_mask=None, cross_attention_kwargs=None
128
+ ):
129
+ output_states = ()
130
+ i = 0
131
+
132
+ for resnet, attn in zip(self.resnets, self.attentions):
133
+ if self.training and self.gradient_checkpointing:
134
+
135
+ def create_custom_forward(module, return_dict=None):
136
+ def custom_forward(*inputs):
137
+ if return_dict is not None:
138
+ return module(*inputs, return_dict=return_dict)
139
+ else:
140
+ return module(*inputs)
141
+
142
+ return custom_forward
143
+
144
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
145
+ hidden_states = torch.utils.checkpoint.checkpoint(
146
+ create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states[i]
147
+ )[0]
148
+ else:
149
+ hidden_states = resnet(hidden_states, temb)
150
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states[i]).sample
151
+
152
+ output_states += (hidden_states,)
153
+ i += 1
154
+
155
+ if self.downsamplers is not None:
156
+ for downsampler in self.downsamplers:
157
+ hidden_states = downsampler(hidden_states)
158
+
159
+ output_states += (hidden_states,)
160
+
161
+ return hidden_states, output_states
162
+
163
+
164
+ def upblock_forward_XTI(
165
+ self,
166
+ hidden_states,
167
+ res_hidden_states_tuple,
168
+ temb=None,
169
+ encoder_hidden_states=None,
170
+ upsample_size=None,
171
+ ):
172
+ i = 0
173
+ for resnet, attn in zip(self.resnets, self.attentions):
174
+ # pop res hidden states
175
+ res_hidden_states = res_hidden_states_tuple[-1]
176
+ res_hidden_states_tuple = res_hidden_states_tuple[:-1]
177
+ hidden_states = torch.cat([hidden_states, res_hidden_states], dim=1)
178
+
179
+ if self.training and self.gradient_checkpointing:
180
+
181
+ def create_custom_forward(module, return_dict=None):
182
+ def custom_forward(*inputs):
183
+ if return_dict is not None:
184
+ return module(*inputs, return_dict=return_dict)
185
+ else:
186
+ return module(*inputs)
187
+
188
+ return custom_forward
189
+
190
+ hidden_states = torch.utils.checkpoint.checkpoint(create_custom_forward(resnet), hidden_states, temb)
191
+ hidden_states = torch.utils.checkpoint.checkpoint(
192
+ create_custom_forward(attn, return_dict=False), hidden_states, encoder_hidden_states[i]
193
+ )[0]
194
+ else:
195
+ hidden_states = resnet(hidden_states, temb)
196
+ hidden_states = attn(hidden_states, encoder_hidden_states=encoder_hidden_states[i]).sample
197
+
198
+ i += 1
199
+
200
+ if self.upsamplers is not None:
201
+ for upsampler in self.upsamplers:
202
+ hidden_states = upsampler(hidden_states, upsample_size)
203
+
204
+ return hidden_states