Update app.py
Browse files
app.py
CHANGED
@@ -72,11 +72,11 @@ def infer(prompts, negative_prompts, image):
|
|
72 |
output=np.array(output, dtype=np.float32)
|
73 |
return output
|
74 |
|
75 |
-
with gr.Blocks(css=".gradio-container {background-color: #
|
76 |
gr.Markdown(
|
77 |
"""
|
78 |
<h1 style="text-align: center;">
|
79 |
-
|
80 |
</h1>
|
81 |
<h3 style="text-align: left;"> This is a demo of Animal Pose ControlNet, which is a model trained on runwayml/stable-diffusion-v1-5 with a new type of conditioning.</h3>
|
82 |
<h3 style="text-align: left;"> While this is definitely a work in progress, you can still try it out by using the p5 sketch to create a keypoint image and using it as the conditioning image.</h3>
|
|
|
72 |
output=np.array(output, dtype=np.float32)
|
73 |
return output
|
74 |
|
75 |
+
with gr.Blocks(css=".gradio-container {background-color: #f8d0ab};") as demo:
|
76 |
gr.Markdown(
|
77 |
"""
|
78 |
<h1 style="text-align: center;">
|
79 |
+
πβπ¦Ί Animal Pose Control Net πββ¬
|
80 |
</h1>
|
81 |
<h3 style="text-align: left;"> This is a demo of Animal Pose ControlNet, which is a model trained on runwayml/stable-diffusion-v1-5 with a new type of conditioning.</h3>
|
82 |
<h3 style="text-align: left;"> While this is definitely a work in progress, you can still try it out by using the p5 sketch to create a keypoint image and using it as the conditioning image.</h3>
|