Spaces:
Running
Running
Update app.py
Browse files
app.py
CHANGED
@@ -97,6 +97,10 @@ def plex(qr_code_value, text, neg_prompt, modil, one, two, three):
|
|
97 |
original.thumbnail((512, 512))
|
98 |
cannyimage = load_image(original).resize((512,512))
|
99 |
cannyimage = np.array(cannyimage)
|
|
|
|
|
|
|
|
|
100 |
low_threshold = 100
|
101 |
high_threshold = 200
|
102 |
cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold)
|
@@ -105,20 +109,14 @@ def plex(qr_code_value, text, neg_prompt, modil, one, two, three):
|
|
105 |
cannyimage = Image.fromarray(cannyimage)
|
106 |
images = [cannyimage]
|
107 |
generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 4836923))
|
108 |
-
|
109 |
-
|
110 |
-
|
111 |
-
images,
|
112 |
-
num_inference_steps=one,
|
113 |
-
generator=generator,
|
114 |
-
strength=two,
|
115 |
-
negative_prompt=[neg_prompt]*2,
|
116 |
-
controlnet_conditioning_scale=three,
|
117 |
-
)
|
118 |
for i, imge in enumerate(image["images"]):
|
119 |
apol.append(imge)
|
120 |
apol.append(original)
|
121 |
apol.append(cannyimage)
|
|
|
122 |
return apol
|
123 |
|
124 |
iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="QR Code URL"),gr.Textbox(label="prompt"),gr.Textbox(label="neg prompt"),gr.Dropdown(choices=models, label="some sd models", value=models[0], type="value"), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=5, value=5), gr.Slider(label="prompt strength", minimum=0.01, step=0.01, maximum=0.99, value=0.20), gr.Slider(label="controlnet scale", minimum=0.01, step=0.01, maximum=0.99, value=0.80)], outputs=gr.Gallery(label="out", columns=1))
|
|
|
97 |
original.thumbnail((512, 512))
|
98 |
cannyimage = load_image(original).resize((512,512))
|
99 |
cannyimage = np.array(cannyimage)
|
100 |
+
pannyimage = load_image(original).resize((512,512))
|
101 |
+
pannyimage = np.array(pannyimage)
|
102 |
+
pannyimage = np.inverse(pannyimage)
|
103 |
+
pannyimage = Image.fromarray(pannyimage)
|
104 |
low_threshold = 100
|
105 |
high_threshold = 200
|
106 |
cannyimage = cv2.Canny(cannyimage, low_threshold, high_threshold)
|
|
|
109 |
cannyimage = Image.fromarray(cannyimage)
|
110 |
images = [cannyimage]
|
111 |
generator = torch.Generator(device="cpu").manual_seed(random.randint(1, 4836923))
|
112 |
+
imzge = pipe(prompt,original,num_inference_steps=one,generator=generator,strength=two,negative_prompt=neg_prompt,controlnet_conditioning_scale=three,).images[0]
|
113 |
+
apol.append(imzge)
|
114 |
+
image = pipe([prompt]*2,images,num_inference_steps=one,generator=generator,strength=two,negative_prompt=[neg_prompt]*2,controlnet_conditioning_scale=three,)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
115 |
for i, imge in enumerate(image["images"]):
|
116 |
apol.append(imge)
|
117 |
apol.append(original)
|
118 |
apol.append(cannyimage)
|
119 |
+
apol.append(pannyimage)
|
120 |
return apol
|
121 |
|
122 |
iface = gr.Interface(fn=plex, inputs=[gr.Textbox(label="QR Code URL"),gr.Textbox(label="prompt"),gr.Textbox(label="neg prompt"),gr.Dropdown(choices=models, label="some sd models", value=models[0], type="value"), gr.Slider(label="num inference steps", minimum=1, step=1, maximum=5, value=5), gr.Slider(label="prompt strength", minimum=0.01, step=0.01, maximum=0.99, value=0.20), gr.Slider(label="controlnet scale", minimum=0.01, step=0.01, maximum=0.99, value=0.80)], outputs=gr.Gallery(label="out", columns=1))
|