yeq6x commited on
Commit
098cbc8
1 Parent(s): 17e3bae

to(device)

Browse files
Files changed (1) hide show
  1. scripts/process_utils.py +19 -19
scripts/process_utils.py CHANGED
@@ -83,10 +83,11 @@ def initialize_sotai_model():
83
 
84
  # Create the ControlNet pipeline
85
  sotai_gen_pipe = StableDiffusionControlNetPipeline(
86
- vae=sd_pipe.vae,
 
87
  text_encoder=sd_pipe.text_encoder,
88
  tokenizer=sd_pipe.tokenizer,
89
- unet=sd_pipe.unet,
90
  scheduler=sd_pipe.scheduler,
91
  safety_checker=sd_pipe.safety_checker,
92
  feature_extractor=sd_pipe.feature_extractor,
@@ -223,23 +224,22 @@ def generate_sotai_image(input_image: Image.Image, output_width: int, output_hei
223
  # EasyNegativeV2の内容
224
  easy_negative_v2 = "(worst quality, low quality, normal quality:1.4), lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, artist name, (bad_prompt_version2:0.8)"
225
 
226
- with torch.autocast('cuda'):
227
- output = sotai_gen_pipe(
228
- prompt,
229
- image=[input_image, input_image],
230
- negative_prompt=f"(wings:1.6), (clothes, garment, lighting, gray, missing limb, extra line, extra limb, extra arm, extra legs, hair, bangs, fringe, forelock, front hair, fill:1.4), (ink pool:1.6)",
231
- # negative_prompt=f"{easy_negative_v2}, (wings:1.6), (clothes, garment, lighting, gray, missing limb, extra line, extra limb, extra arm, extra legs, hair, bangs, fringe, forelock, front hair, fill:1.4), (ink pool:1.6)",
232
- num_inference_steps=20,
233
- guidance_scale=8,
234
- width=output_width,
235
- height=output_height,
236
- denoising_strength=0.13,
237
- num_images_per_prompt=1, # Equivalent to batch_size
238
- guess_mode=[True, True], # Equivalent to pixel_perfect
239
- controlnet_conditioning_scale=[1.4, 1.3], # 各ControlNetの重み
240
- guidance_start=[0.0, 0.0],
241
- guidance_end=[1.0, 1.0],
242
- )
243
  generated_image = output.images[0]
244
 
245
  return generated_image
 
83
 
84
  # Create the ControlNet pipeline
85
  sotai_gen_pipe = StableDiffusionControlNetPipeline(
86
+ vae=sd_pipe.vae.to(device),
87
+ torch_dtype=torch_dtype,
88
  text_encoder=sd_pipe.text_encoder,
89
  tokenizer=sd_pipe.tokenizer,
90
+ unet=sd_pipe.unet.to(device),
91
  scheduler=sd_pipe.scheduler,
92
  safety_checker=sd_pipe.safety_checker,
93
  feature_extractor=sd_pipe.feature_extractor,
 
224
  # EasyNegativeV2の内容
225
  easy_negative_v2 = "(worst quality, low quality, normal quality:1.4), lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, jpeg artifacts, signature, watermark, username, blurry, artist name, (bad_prompt_version2:0.8)"
226
 
227
+ output = sotai_gen_pipe(
228
+ prompt,
229
+ image=[input_image, input_image],
230
+ negative_prompt=f"(wings:1.6), (clothes, garment, lighting, gray, missing limb, extra line, extra limb, extra arm, extra legs, hair, bangs, fringe, forelock, front hair, fill:1.4), (ink pool:1.6)",
231
+ # negative_prompt=f"{easy_negative_v2}, (wings:1.6), (clothes, garment, lighting, gray, missing limb, extra line, extra limb, extra arm, extra legs, hair, bangs, fringe, forelock, front hair, fill:1.4), (ink pool:1.6)",
232
+ num_inference_steps=20,
233
+ guidance_scale=8,
234
+ width=output_width,
235
+ height=output_height,
236
+ denoising_strength=0.13,
237
+ num_images_per_prompt=1, # Equivalent to batch_size
238
+ guess_mode=[True, True], # Equivalent to pixel_perfect
239
+ controlnet_conditioning_scale=[1.4, 1.3], # 各ControlNetの重み
240
+ guidance_start=[0.0, 0.0],
241
+ guidance_end=[1.0, 1.0],
242
+ )
 
243
  generated_image = output.images[0]
244
 
245
  return generated_image