ACCC1380 commited on
Commit
d565e9b
1 Parent(s): 806602c

Upload lora-scripts/sd-scripts/library/ipex/diffusers.py with huggingface_hub

Browse files
lora-scripts/sd-scripts/library/ipex/diffusers.py ADDED
@@ -0,0 +1,312 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import intel_extension_for_pytorch as ipex # pylint: disable=import-error, unused-import
4
+ import diffusers #0.24.0 # pylint: disable=import-error
5
+ from diffusers.models.attention_processor import Attention
6
+ from diffusers.utils import USE_PEFT_BACKEND
7
+ from functools import cache
8
+
9
+ # pylint: disable=protected-access, missing-function-docstring, line-too-long
10
+
11
+ attention_slice_rate = float(os.environ.get('IPEX_ATTENTION_SLICE_RATE', 4))
12
+
13
+ @cache
14
+ def find_slice_size(slice_size, slice_block_size):
15
+ while (slice_size * slice_block_size) > attention_slice_rate:
16
+ slice_size = slice_size // 2
17
+ if slice_size <= 1:
18
+ slice_size = 1
19
+ break
20
+ return slice_size
21
+
22
+ @cache
23
+ def find_attention_slice_sizes(query_shape, query_element_size, query_device_type, slice_size=None):
24
+ if len(query_shape) == 3:
25
+ batch_size_attention, query_tokens, shape_three = query_shape
26
+ shape_four = 1
27
+ else:
28
+ batch_size_attention, query_tokens, shape_three, shape_four = query_shape
29
+ if slice_size is not None:
30
+ batch_size_attention = slice_size
31
+
32
+ slice_block_size = query_tokens * shape_three * shape_four / 1024 / 1024 * query_element_size
33
+ block_size = batch_size_attention * slice_block_size
34
+
35
+ split_slice_size = batch_size_attention
36
+ split_2_slice_size = query_tokens
37
+ split_3_slice_size = shape_three
38
+
39
+ do_split = False
40
+ do_split_2 = False
41
+ do_split_3 = False
42
+
43
+ if query_device_type != "xpu":
44
+ return do_split, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size
45
+
46
+ if block_size > attention_slice_rate:
47
+ do_split = True
48
+ split_slice_size = find_slice_size(split_slice_size, slice_block_size)
49
+ if split_slice_size * slice_block_size > attention_slice_rate:
50
+ slice_2_block_size = split_slice_size * shape_three * shape_four / 1024 / 1024 * query_element_size
51
+ do_split_2 = True
52
+ split_2_slice_size = find_slice_size(split_2_slice_size, slice_2_block_size)
53
+ if split_2_slice_size * slice_2_block_size > attention_slice_rate:
54
+ slice_3_block_size = split_slice_size * split_2_slice_size * shape_four / 1024 / 1024 * query_element_size
55
+ do_split_3 = True
56
+ split_3_slice_size = find_slice_size(split_3_slice_size, slice_3_block_size)
57
+
58
+ return do_split, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size
59
+
60
+ class SlicedAttnProcessor: # pylint: disable=too-few-public-methods
61
+ r"""
62
+ Processor for implementing sliced attention.
63
+
64
+ Args:
65
+ slice_size (`int`, *optional*):
66
+ The number of steps to compute attention. Uses as many slices as `attention_head_dim // slice_size`, and
67
+ `attention_head_dim` must be a multiple of the `slice_size`.
68
+ """
69
+
70
+ def __init__(self, slice_size):
71
+ self.slice_size = slice_size
72
+
73
+ def __call__(self, attn: Attention, hidden_states: torch.FloatTensor,
74
+ encoder_hidden_states=None, attention_mask=None) -> torch.FloatTensor: # pylint: disable=too-many-statements, too-many-locals, too-many-branches
75
+
76
+ residual = hidden_states
77
+
78
+ input_ndim = hidden_states.ndim
79
+
80
+ if input_ndim == 4:
81
+ batch_size, channel, height, width = hidden_states.shape
82
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
83
+
84
+ batch_size, sequence_length, _ = (
85
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
86
+ )
87
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
88
+
89
+ if attn.group_norm is not None:
90
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
91
+
92
+ query = attn.to_q(hidden_states)
93
+ dim = query.shape[-1]
94
+ query = attn.head_to_batch_dim(query)
95
+
96
+ if encoder_hidden_states is None:
97
+ encoder_hidden_states = hidden_states
98
+ elif attn.norm_cross:
99
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
100
+
101
+ key = attn.to_k(encoder_hidden_states)
102
+ value = attn.to_v(encoder_hidden_states)
103
+ key = attn.head_to_batch_dim(key)
104
+ value = attn.head_to_batch_dim(value)
105
+
106
+ batch_size_attention, query_tokens, shape_three = query.shape
107
+ hidden_states = torch.zeros(
108
+ (batch_size_attention, query_tokens, dim // attn.heads), device=query.device, dtype=query.dtype
109
+ )
110
+
111
+ ####################################################################
112
+ # ARC GPUs can't allocate more than 4GB to a single block, Slice it:
113
+ _, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size = find_attention_slice_sizes(query.shape, query.element_size(), query.device.type, slice_size=self.slice_size)
114
+
115
+ for i in range(batch_size_attention // split_slice_size):
116
+ start_idx = i * split_slice_size
117
+ end_idx = (i + 1) * split_slice_size
118
+ if do_split_2:
119
+ for i2 in range(query_tokens // split_2_slice_size): # pylint: disable=invalid-name
120
+ start_idx_2 = i2 * split_2_slice_size
121
+ end_idx_2 = (i2 + 1) * split_2_slice_size
122
+ if do_split_3:
123
+ for i3 in range(shape_three // split_3_slice_size): # pylint: disable=invalid-name
124
+ start_idx_3 = i3 * split_3_slice_size
125
+ end_idx_3 = (i3 + 1) * split_3_slice_size
126
+
127
+ query_slice = query[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3]
128
+ key_slice = key[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3]
129
+ attn_mask_slice = attention_mask[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3] if attention_mask is not None else None
130
+
131
+ attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
132
+ del query_slice
133
+ del key_slice
134
+ del attn_mask_slice
135
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3])
136
+
137
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3] = attn_slice
138
+ del attn_slice
139
+ else:
140
+ query_slice = query[start_idx:end_idx, start_idx_2:end_idx_2]
141
+ key_slice = key[start_idx:end_idx, start_idx_2:end_idx_2]
142
+ attn_mask_slice = attention_mask[start_idx:end_idx, start_idx_2:end_idx_2] if attention_mask is not None else None
143
+
144
+ attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
145
+ del query_slice
146
+ del key_slice
147
+ del attn_mask_slice
148
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx, start_idx_2:end_idx_2])
149
+
150
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = attn_slice
151
+ del attn_slice
152
+ torch.xpu.synchronize(query.device)
153
+ else:
154
+ query_slice = query[start_idx:end_idx]
155
+ key_slice = key[start_idx:end_idx]
156
+ attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None
157
+
158
+ attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
159
+ del query_slice
160
+ del key_slice
161
+ del attn_mask_slice
162
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx])
163
+
164
+ hidden_states[start_idx:end_idx] = attn_slice
165
+ del attn_slice
166
+ ####################################################################
167
+
168
+ hidden_states = attn.batch_to_head_dim(hidden_states)
169
+
170
+ # linear proj
171
+ hidden_states = attn.to_out[0](hidden_states)
172
+ # dropout
173
+ hidden_states = attn.to_out[1](hidden_states)
174
+
175
+ if input_ndim == 4:
176
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
177
+
178
+ if attn.residual_connection:
179
+ hidden_states = hidden_states + residual
180
+
181
+ hidden_states = hidden_states / attn.rescale_output_factor
182
+
183
+ return hidden_states
184
+
185
+
186
+ class AttnProcessor:
187
+ r"""
188
+ Default processor for performing attention-related computations.
189
+ """
190
+
191
+ def __call__(self, attn: Attention, hidden_states: torch.FloatTensor,
192
+ encoder_hidden_states=None, attention_mask=None,
193
+ temb=None, scale: float = 1.0) -> torch.Tensor: # pylint: disable=too-many-statements, too-many-locals, too-many-branches
194
+
195
+ residual = hidden_states
196
+
197
+ args = () if USE_PEFT_BACKEND else (scale,)
198
+
199
+ if attn.spatial_norm is not None:
200
+ hidden_states = attn.spatial_norm(hidden_states, temb)
201
+
202
+ input_ndim = hidden_states.ndim
203
+
204
+ if input_ndim == 4:
205
+ batch_size, channel, height, width = hidden_states.shape
206
+ hidden_states = hidden_states.view(batch_size, channel, height * width).transpose(1, 2)
207
+
208
+ batch_size, sequence_length, _ = (
209
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
210
+ )
211
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
212
+
213
+ if attn.group_norm is not None:
214
+ hidden_states = attn.group_norm(hidden_states.transpose(1, 2)).transpose(1, 2)
215
+
216
+ query = attn.to_q(hidden_states, *args)
217
+
218
+ if encoder_hidden_states is None:
219
+ encoder_hidden_states = hidden_states
220
+ elif attn.norm_cross:
221
+ encoder_hidden_states = attn.norm_encoder_hidden_states(encoder_hidden_states)
222
+
223
+ key = attn.to_k(encoder_hidden_states, *args)
224
+ value = attn.to_v(encoder_hidden_states, *args)
225
+
226
+ query = attn.head_to_batch_dim(query)
227
+ key = attn.head_to_batch_dim(key)
228
+ value = attn.head_to_batch_dim(value)
229
+
230
+ ####################################################################
231
+ # ARC GPUs can't allocate more than 4GB to a single block, Slice it:
232
+ batch_size_attention, query_tokens, shape_three = query.shape[0], query.shape[1], query.shape[2]
233
+ hidden_states = torch.zeros(query.shape, device=query.device, dtype=query.dtype)
234
+ do_split, do_split_2, do_split_3, split_slice_size, split_2_slice_size, split_3_slice_size = find_attention_slice_sizes(query.shape, query.element_size(), query.device.type)
235
+
236
+ if do_split:
237
+ for i in range(batch_size_attention // split_slice_size):
238
+ start_idx = i * split_slice_size
239
+ end_idx = (i + 1) * split_slice_size
240
+ if do_split_2:
241
+ for i2 in range(query_tokens // split_2_slice_size): # pylint: disable=invalid-name
242
+ start_idx_2 = i2 * split_2_slice_size
243
+ end_idx_2 = (i2 + 1) * split_2_slice_size
244
+ if do_split_3:
245
+ for i3 in range(shape_three // split_3_slice_size): # pylint: disable=invalid-name
246
+ start_idx_3 = i3 * split_3_slice_size
247
+ end_idx_3 = (i3 + 1) * split_3_slice_size
248
+
249
+ query_slice = query[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3]
250
+ key_slice = key[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3]
251
+ attn_mask_slice = attention_mask[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3] if attention_mask is not None else None
252
+
253
+ attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
254
+ del query_slice
255
+ del key_slice
256
+ del attn_mask_slice
257
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3])
258
+
259
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2, start_idx_3:end_idx_3] = attn_slice
260
+ del attn_slice
261
+ else:
262
+ query_slice = query[start_idx:end_idx, start_idx_2:end_idx_2]
263
+ key_slice = key[start_idx:end_idx, start_idx_2:end_idx_2]
264
+ attn_mask_slice = attention_mask[start_idx:end_idx, start_idx_2:end_idx_2] if attention_mask is not None else None
265
+
266
+ attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
267
+ del query_slice
268
+ del key_slice
269
+ del attn_mask_slice
270
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx, start_idx_2:end_idx_2])
271
+
272
+ hidden_states[start_idx:end_idx, start_idx_2:end_idx_2] = attn_slice
273
+ del attn_slice
274
+ else:
275
+ query_slice = query[start_idx:end_idx]
276
+ key_slice = key[start_idx:end_idx]
277
+ attn_mask_slice = attention_mask[start_idx:end_idx] if attention_mask is not None else None
278
+
279
+ attn_slice = attn.get_attention_scores(query_slice, key_slice, attn_mask_slice)
280
+ del query_slice
281
+ del key_slice
282
+ del attn_mask_slice
283
+ attn_slice = torch.bmm(attn_slice, value[start_idx:end_idx])
284
+
285
+ hidden_states[start_idx:end_idx] = attn_slice
286
+ del attn_slice
287
+ torch.xpu.synchronize(query.device)
288
+ else:
289
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
290
+ hidden_states = torch.bmm(attention_probs, value)
291
+ ####################################################################
292
+ hidden_states = attn.batch_to_head_dim(hidden_states)
293
+
294
+ # linear proj
295
+ hidden_states = attn.to_out[0](hidden_states, *args)
296
+ # dropout
297
+ hidden_states = attn.to_out[1](hidden_states)
298
+
299
+ if input_ndim == 4:
300
+ hidden_states = hidden_states.transpose(-1, -2).reshape(batch_size, channel, height, width)
301
+
302
+ if attn.residual_connection:
303
+ hidden_states = hidden_states + residual
304
+
305
+ hidden_states = hidden_states / attn.rescale_output_factor
306
+
307
+ return hidden_states
308
+
309
+ def ipex_diffusers():
310
+ #ARC GPUs can't allocate more than 4GB to a single block:
311
+ diffusers.models.attention_processor.SlicedAttnProcessor = SlicedAttnProcessor
312
+ diffusers.models.attention_processor.AttnProcessor = AttnProcessor