mattyamonaca commited on
Commit
7eecfbf
1 Parent(s): bfea134
Files changed (2) hide show
  1. app.py +31 -3
  2. sd_model.py +8 -43
app.py CHANGED
@@ -11,6 +11,7 @@ import os
11
  import numpy as np
12
  from PIL import Image
13
  import zipfile
 
14
 
15
  path = os.getcwd()
16
  output_dir = f"{path}/output"
@@ -19,6 +20,36 @@ cn_lineart_dir = f"{path}/controlnet/lineart"
19
 
20
  load_cn_model(cn_lineart_dir)
21
  load_cn_config(cn_lineart_dir)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
22
 
23
 
24
  def zip_png_files(folder_path):
@@ -60,7 +91,6 @@ def resize_image(img, max_size=1024):
60
  return img
61
 
62
 
63
-
64
  class webui:
65
  def __init__(self):
66
  self.demo = gr.Blocks()
@@ -75,9 +105,7 @@ class webui:
75
  image[index] = [255, 255, 255, 255]
76
  input_image = cv2pil(image)
77
 
78
- pipe = get_cn_pipeline(reference_flg)
79
  detectors = get_cn_detector(input_image.resize((1024, 1024), Image.ANTIALIAS))
80
-
81
 
82
  gen_image = generate(pipe, detectors, pos_prompt, neg_prompt, reference_flg, reference_img)
83
  color_img, unfinished = process(gen_image.resize((image.shape[1], image.shape[0]), Image.ANTIALIAS) , org_line_image, alpha_th, thickness)
 
11
  import numpy as np
12
  from PIL import Image
13
  import zipfile
14
+ import spaces
15
 
16
  path = os.getcwd()
17
  output_dir = f"{path}/output"
 
20
 
21
  load_cn_model(cn_lineart_dir)
22
  load_cn_config(cn_lineart_dir)
23
+ pipe = get_cn_pipeline()
24
+
25
+ @spaces.GPU(duration=120)
26
+ def generate(detectors, prompt, negative_prompt, reference_flg=False, reference_img=None):
27
+ default_pos = ""
28
+ default_neg = ""
29
+ prompt = default_pos + prompt
30
+ negative_prompt = default_neg + negative_prompt
31
+
32
+
33
+ if reference_flg==False:
34
+ image = pipe(
35
+ prompt=prompt,
36
+ negative_prompt = negative_prompt,
37
+ image=detectors,
38
+ num_inference_steps=50,
39
+ controlnet_conditioning_scale=[1.0, 0.2],
40
+ ip_adapter_image=None,
41
+ ).images[0]
42
+ else:
43
+
44
+ image = pipe(
45
+ prompt=prompt,
46
+ negative_prompt = negative_prompt,
47
+ image=detectors,
48
+ num_inference_steps=50,
49
+ controlnet_conditioning_scale=[1.0, 0.2],
50
+ ip_adapter_image=reference_img,
51
+ ).images[0]
52
+ return image
53
 
54
 
55
  def zip_png_files(folder_path):
 
91
  return img
92
 
93
 
 
94
  class webui:
95
  def __init__(self):
96
  self.demo = gr.Blocks()
 
105
  image[index] = [255, 255, 255, 255]
106
  input_image = cv2pil(image)
107
 
 
108
  detectors = get_cn_detector(input_image.resize((1024, 1024), Image.ANTIALIAS))
 
109
 
110
  gen_image = generate(pipe, detectors, pos_prompt, neg_prompt, reference_flg, reference_img)
111
  color_img, unfinished = process(gen_image.resize((image.shape[1], image.shape[0]), Image.ANTIALIAS) , org_line_image, alpha_th, thickness)
sd_model.py CHANGED
@@ -2,11 +2,11 @@ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel, UniPCM
2
  from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
3
  import torch
4
  import pickle as pkl
5
- import spaces
6
 
7
  device = "cuda"
8
 
9
- def get_cn_pipeline(reference_flg):
10
  controlnets = [
11
  ControlNetModel.from_pretrained("./controlnet/lineart", torch_dtype=torch.float16, use_safetensors=True),
12
  ControlNetModel.from_pretrained("mattyamonaca/controlnet_line2line_xl", torch_dtype=torch.float16)
@@ -16,6 +16,11 @@ def get_cn_pipeline(reference_flg):
16
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
17
  "cagliostrolab/animagine-xl-3.1", controlnet=controlnets, vae=vae, torch_dtype=torch.float16
18
  )
 
 
 
 
 
19
 
20
  return pipe
21
 
@@ -30,47 +35,7 @@ def invert_image(img):
30
 
31
 
32
  def get_cn_detector(image):
33
- #lineart_anime = LineartAnimeDetector.from_pretrained("lllyasviel/Annotators")
34
- #canny = CannyDetector()
35
- #lineart_anime_img = lineart_anime(image)
36
- #canny_img = canny(image)
37
- #canny_img = canny_img.resize((lineart_anime(image).width, lineart_anime(image).height))
38
- re_image = invert_image(image)
39
-
40
-
41
  detectors = [re_image, image]
42
- print(detectors)
43
  return detectors
44
 
45
- @spaces.GPU(duration=120)
46
- def generate(pipe, detectors, prompt, negative_prompt, reference_flg=False, reference_img=None):
47
- pipe.to("cuda")
48
- default_pos = ""
49
- default_neg = ""
50
- prompt = default_pos + prompt
51
- negative_prompt = default_neg + negative_prompt
52
-
53
-
54
- if reference_flg==False:
55
- image = pipe(
56
- prompt=prompt,
57
- negative_prompt = negative_prompt,
58
- image=detectors,
59
- num_inference_steps=50,
60
- controlnet_conditioning_scale=[1.0, 0.2],
61
- ).images[0]
62
- else:
63
- pipe.load_ip_adapter(
64
- "ozzygt/sdxl-ip-adapter",
65
- "",
66
- weight_name="ip-adapter_sdxl_vit-h.safetensors")
67
- image = pipe(
68
- prompt=prompt,
69
- negative_prompt = negative_prompt,
70
- image=detectors,
71
- num_inference_steps=50,
72
- controlnet_conditioning_scale=[1.0, 0.2],
73
- ip_adapter_image=reference_img,
74
- ).images[0]
75
-
76
- return image
 
2
  from diffusers import StableDiffusionXLControlNetPipeline, ControlNetModel, AutoencoderKL
3
  import torch
4
  import pickle as pkl
5
+
6
 
7
  device = "cuda"
8
 
9
+ def get_cn_pipeline():
10
  controlnets = [
11
  ControlNetModel.from_pretrained("./controlnet/lineart", torch_dtype=torch.float16, use_safetensors=True),
12
  ControlNetModel.from_pretrained("mattyamonaca/controlnet_line2line_xl", torch_dtype=torch.float16)
 
16
  pipe = StableDiffusionXLControlNetPipeline.from_pretrained(
17
  "cagliostrolab/animagine-xl-3.1", controlnet=controlnets, vae=vae, torch_dtype=torch.float16
18
  )
19
+ pipe.load_ip_adapter(
20
+ "ozzygt/sdxl-ip-adapter",
21
+ "",
22
+ weight_name="ip-adapter_sdxl_vit-h.safetensors"
23
+ )
24
 
25
  return pipe
26
 
 
35
 
36
 
37
  def get_cn_detector(image):
38
+ re_image = invert_image(image)
 
 
 
 
 
 
 
39
  detectors = [re_image, image]
 
40
  return detectors
41