Update app.py
Browse files
app.py
CHANGED
@@ -1,3 +1,4 @@
|
|
|
|
1 |
import gradio as gr
|
2 |
|
3 |
import torch
|
@@ -12,7 +13,7 @@ model_id = "runwayml/stable-diffusion-v1-5"
|
|
12 |
# model_id = "CompVis/stable-diffusion-v1-4"
|
13 |
|
14 |
has_cuda = torch.cuda.is_available()
|
15 |
-
|
16 |
if has_cuda:
|
17 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", use_auth_token=READ_TOKEN)
|
18 |
device = "cuda"
|
@@ -70,9 +71,12 @@ with gr.Blocks() as demo:
|
|
70 |
bt_make_image = gr.Button("Generate and image (takes about 10-15 minutes on CPU)", visible=False)
|
71 |
|
72 |
image = gr.Image(label='Illustration for your story')
|
73 |
-
inference_steps = gr.Slider(5,
|
74 |
|
75 |
bt_make_text.click(fn=generate_story, inputs=prompt, outputs=[story, summary, bt_make_image])
|
76 |
bt_make_image.click(fn=generate_image, inputs=[summary, inference_steps], outputs=image)
|
77 |
|
78 |
-
|
|
|
|
|
|
|
|
1 |
+
import os
|
2 |
import gradio as gr
|
3 |
|
4 |
import torch
|
|
|
13 |
# model_id = "CompVis/stable-diffusion-v1-4"
|
14 |
|
15 |
has_cuda = torch.cuda.is_available()
|
16 |
+
|
17 |
if has_cuda:
|
18 |
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float16, revision="fp16", use_auth_token=READ_TOKEN)
|
19 |
device = "cuda"
|
|
|
71 |
bt_make_image = gr.Button("Generate and image (takes about 10-15 minutes on CPU)", visible=False)
|
72 |
|
73 |
image = gr.Image(label='Illustration for your story')
|
74 |
+
inference_steps = gr.Slider(5, 30, value=15, step=1, label="Num inference steps (more steps makes a better image but takes more time)")
|
75 |
|
76 |
bt_make_text.click(fn=generate_story, inputs=prompt, outputs=[story, summary, bt_make_image])
|
77 |
bt_make_image.click(fn=generate_image, inputs=[summary, inference_steps], outputs=image)
|
78 |
|
79 |
+
if READ_TOKEN:
|
80 |
+
demo.launch()
|
81 |
+
else:
|
82 |
+
demo.launch(share=True, debug=True)
|