Gregorio1502 commited on
Commit
1790e97
1 Parent(s): 4038410

Create noise.py

Browse files
Files changed (1) hide show
  1. noise.py +266 -0
noise.py ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+
3
+ import os
4
+ import sys
5
+
6
+ sys.path.insert(0, os.path.join(os.path.dirname(os.path.realpath(__file__)), "CFUI"))
7
+
8
+ import CFUI.model_management
9
+ import CFUI.sample
10
+ import CFUI.sampler_helpers
11
+
12
+ MAX_RESOLUTION=8192
13
+
14
+ def prepare_mask(mask, shape):
15
+ mask = torch.nn.functional.interpolate(mask.reshape((-1, 1, mask.shape[-2], mask.shape[-1])), size=(shape[2], shape[3]), mode="bilinear")
16
+ mask = mask.expand((-1,shape[1],-1,-1))
17
+ if mask.shape[0] < shape[0]:
18
+ mask = mask.repeat((shape[0] -1) // mask.shape[0] + 1, 1, 1, 1)[:shape[0]]
19
+ return mask
20
+
21
+ class NoisyLatentImage:
22
+ @classmethod
23
+ def INPUT_TYPES(s):
24
+ return {"required": {
25
+ "source":(["CPU", "GPU"], ),
26
+ "seed": ("INT", {"default": 0, "min": 0, "max": 0xffffffffffffffff}),
27
+ "width": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
28
+ "height": ("INT", {"default": 512, "min": 64, "max": MAX_RESOLUTION, "step": 8}),
29
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
30
+ }}
31
+ RETURN_TYPES = ("LATENT",)
32
+ FUNCTION = "create_noisy_latents"
33
+
34
+ CATEGORY = "latent/noise"
35
+
36
+ def create_noisy_latents(self, source, seed, width, height, batch_size):
37
+ torch.manual_seed(seed)
38
+ if source == "CPU":
39
+ device = "cpu"
40
+ else:
41
+ device = CFUI.model_management.get_torch_device()
42
+ noise = torch.randn((batch_size, 4, height // 8, width // 8), dtype=torch.float32, device=device).cpu()
43
+ return ({"samples":noise}, )
44
+
45
+ class DuplicateBatchIndex:
46
+ @classmethod
47
+ def INPUT_TYPES(s):
48
+ return {"required": {
49
+ "latents":("LATENT",),
50
+ "batch_index": ("INT", {"default": 0, "min": 0, "max": 63}),
51
+ "batch_size": ("INT", {"default": 1, "min": 1, "max": 64}),
52
+ }}
53
+
54
+ RETURN_TYPES = ("LATENT",)
55
+ FUNCTION = "duplicate_index"
56
+
57
+ CATEGORY = "latent"
58
+
59
+ def duplicate_index(self, latents, batch_index, batch_size):
60
+ s = latents.copy()
61
+ batch_index = min(s["samples"].shape[0] - 1, batch_index)
62
+ target = s["samples"][batch_index:batch_index + 1].clone()
63
+ target = target.repeat((batch_size,1,1,1))
64
+ s["samples"] = target
65
+ return (s,)
66
+
67
+ # from https://discuss.pytorch.org/t/help-regarding-slerp-function-for-generative-model-sampling/32475
68
+ def slerp(val, low, high):
69
+ dims = low.shape
70
+
71
+ #flatten to batches
72
+ low = low.reshape(dims[0], -1)
73
+ high = high.reshape(dims[0], -1)
74
+
75
+ low_norm = low/torch.norm(low, dim=1, keepdim=True)
76
+ high_norm = high/torch.norm(high, dim=1, keepdim=True)
77
+
78
+ # in case we divide by zero
79
+ low_norm[low_norm != low_norm] = 0.0
80
+ high_norm[high_norm != high_norm] = 0.0
81
+
82
+ omega = torch.acos((low_norm*high_norm).sum(1))
83
+ so = torch.sin(omega)
84
+ res = (torch.sin((1.0-val)*omega)/so).unsqueeze(1)*low + (torch.sin(val*omega)/so).unsqueeze(1) * high
85
+ return res.reshape(dims)
86
+
87
+ class LatentSlerp:
88
+ @classmethod
89
+ def INPUT_TYPES(s):
90
+ return {
91
+ "required": {
92
+ "latents1":("LATENT",),
93
+ "factor": ("FLOAT", {"default": .5, "min": 0.0, "max": 1.0, "step": 0.01}),
94
+ },
95
+ "optional" :{
96
+ "latents2":("LATENT",),
97
+ "mask": ("MASK", ),
98
+ }}
99
+
100
+ RETURN_TYPES = ("LATENT",)
101
+ FUNCTION = "slerp_latents"
102
+
103
+ CATEGORY = "latent"
104
+
105
+ def slerp_latents(self, latents1, factor, latents2=None, mask=None):
106
+ s = latents1.copy()
107
+ if latents2 is None:
108
+ return (s,)
109
+ if latents1["samples"].shape != latents2["samples"].shape:
110
+ print("warning, shapes in LatentSlerp not the same, ignoring")
111
+ return (s,)
112
+ slerped = slerp(factor, latents1["samples"].clone(), latents2["samples"].clone())
113
+ if mask is not None:
114
+ mask = prepare_mask(mask, slerped.shape)
115
+ slerped = mask * slerped + (1-mask) * latents1["samples"]
116
+ s["samples"] = slerped
117
+ return (s,)
118
+
119
+ class GetSigma:
120
+ @classmethod
121
+ def INPUT_TYPES(s):
122
+ return {"required": {
123
+ "model": ("MODEL",),
124
+ "sampler_name": (CFUI.samplers.KSampler.SAMPLERS, ),
125
+ "scheduler": (CFUI.samplers.KSampler.SCHEDULERS, ),
126
+ "steps": ("INT", {"default": 10000, "min": 0, "max": 10000}),
127
+ "start_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
128
+ "end_at_step": ("INT", {"default": 10000, "min": 1, "max": 10000}),
129
+ }}
130
+
131
+ RETURN_TYPES = ("FLOAT",)
132
+ FUNCTION = "calc_sigma"
133
+
134
+ CATEGORY = "latent/noise"
135
+
136
+ def calc_sigma(self, model, sampler_name, scheduler, steps, start_at_step, end_at_step):
137
+ device = CFUI.model_management.get_torch_device()
138
+ end_at_step = min(steps, end_at_step)
139
+ start_at_step = min(start_at_step, end_at_step)
140
+ CFUI.model_management.load_model_gpu(model)
141
+ sampler = CFUI.samplers.KSampler(model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options)
142
+ sigmas = sampler.sigmas
143
+ sigma = sigmas[start_at_step] - sigmas[end_at_step]
144
+ sigma /= model.model.latent_format.scale_factor
145
+ return (sigma.cpu().numpy(),)
146
+
147
+ class InjectNoise:
148
+ @classmethod
149
+ def INPUT_TYPES(s):
150
+ return {"required": {
151
+ "latents":("LATENT",),
152
+
153
+ "strength": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 200.0, "step": 0.01}),
154
+ },
155
+ "optional":{
156
+ "noise": ("LATENT",),
157
+ "mask": ("MASK", ),
158
+ }}
159
+
160
+ RETURN_TYPES = ("LATENT",)
161
+ FUNCTION = "inject_noise"
162
+
163
+ CATEGORY = "latent/noise"
164
+
165
+ def inject_noise(self, latents, strength, noise=None, mask=None):
166
+ s = latents.copy()
167
+ if noise is None:
168
+ return (s,)
169
+ if latents["samples"].shape != noise["samples"].shape:
170
+ print("warning, shapes in InjectNoise not the same, ignoring")
171
+ return (s,)
172
+ noised = s["samples"].clone() + noise["samples"].clone() * strength
173
+ if mask is not None:
174
+ mask = prepare_mask(mask, noised.shape)
175
+ noised = mask * noised + (1-mask) * latents["samples"]
176
+ s["samples"] = noised
177
+ return (s,)
178
+
179
+ class Unsampler:
180
+ @classmethod
181
+ def INPUT_TYPES(s):
182
+ return {"required":
183
+ {"model": ("MODEL",),
184
+ "steps": ("INT", {"default": 20, "min": 1, "max": 10000}),
185
+ "end_at_step": ("INT", {"default": 0, "min": 0, "max": 10000}),
186
+ "cfg": ("FLOAT", {"default": 1.0, "min": 0.0, "max": 100.0}),
187
+ "sampler_name": (CFUI.samplers.KSampler.SAMPLERS, ),
188
+ "scheduler": (CFUI.samplers.KSampler.SCHEDULERS, ),
189
+ "normalize": (["disable", "enable"], ),
190
+ "positive": ("CONDITIONING", ),
191
+ "negative": ("CONDITIONING", ),
192
+ "latent_image": ("LATENT", ),
193
+ }}
194
+
195
+ RETURN_TYPES = ("LATENT",)
196
+ FUNCTION = "unsampler"
197
+
198
+ CATEGORY = "sampling"
199
+
200
+ def unsampler(self, model, cfg, sampler_name, steps, end_at_step, scheduler, normalize, positive, negative, latent_image):
201
+ normalize = normalize == "enable"
202
+ device = CFUI.model_management.get_torch_device()
203
+ latent = latent_image
204
+ latent_image = latent["samples"]
205
+
206
+ end_at_step = min(end_at_step, steps-1)
207
+ end_at_step = steps - end_at_step
208
+
209
+ noise = torch.zeros(latent_image.size(), dtype=latent_image.dtype, layout=latent_image.layout, device="cpu")
210
+ noise_mask = None
211
+ if "noise_mask" in latent:
212
+ noise_mask = CFUI.sampler_helpers.prepare_mask(latent["noise_mask"], noise.shape, device)
213
+
214
+ noise = noise.to(device)
215
+ latent_image = latent_image.to(device)
216
+
217
+ conds0 = \
218
+ {"positive": CFUI.sampler_helpers.convert_cond(positive),
219
+ "negative": CFUI.sampler_helpers.convert_cond(negative)}
220
+
221
+ conds = {}
222
+ for k in conds0:
223
+ conds[k] = list(map(lambda a: a.copy(), conds0[k]))
224
+
225
+ models, inference_memory = CFUI.sampler_helpers.get_additional_models(conds, model.model_dtype())
226
+
227
+ CFUI.model_management.load_models_gpu([model] + models, model.memory_required(noise.shape) + inference_memory)
228
+
229
+ sampler = CFUI.samplers.KSampler(model, steps=steps, device=device, sampler=sampler_name, scheduler=scheduler, denoise=1.0, model_options=model.model_options)
230
+
231
+ sigmas = sampler.sigmas.flip(0) + 0.0001
232
+
233
+ pbar = CFUI.utils.ProgressBar(steps)
234
+ def callback(step, x0, x, total_steps):
235
+ pbar.update_absolute(step + 1, total_steps)
236
+
237
+ samples = sampler.sample(noise, positive, negative, cfg=cfg, latent_image=latent_image, force_full_denoise=False, denoise_mask=noise_mask, sigmas=sigmas, start_step=0, last_step=end_at_step, callback=callback)
238
+ if normalize:
239
+ #technically doesn't normalize because unsampling is not guaranteed to end at a std given by the schedule
240
+ samples -= samples.mean()
241
+ samples /= samples.std()
242
+ samples = samples.cpu()
243
+
244
+ CFUI.sampler_helpers.cleanup_additional_models(models)
245
+
246
+ out = latent.copy()
247
+ out["samples"] = samples
248
+ return (out, )
249
+
250
+ NODE_CLASS_MAPPINGS = {
251
+ "BNK_NoisyLatentImage": NoisyLatentImage,
252
+ #"BNK_DuplicateBatchIndex": DuplicateBatchIndex,
253
+ "BNK_SlerpLatent": LatentSlerp,
254
+ "BNK_GetSigma": GetSigma,
255
+ "BNK_InjectNoise": InjectNoise,
256
+ "BNK_Unsampler": Unsampler,
257
+ }
258
+
259
+ NODE_DISPLAY_NAME_MAPPINGS = {
260
+ "BNK_NoisyLatentImage": "Noisy Latent Image",
261
+ #"BNK_DuplicateBatchIndex": "Duplicate Batch Index",
262
+ "BNK_SlerpLatent": "Slerp Latents",
263
+ "BNK_GetSigma": "Get Sigma",
264
+ "BNK_InjectNoise": "Inject Noise",
265
+ "BNK_Unsampler": "Unsampler",
266
+ }