Update app.py
Browse files
app.py
CHANGED
@@ -1,41 +1,48 @@
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import random
|
4 |
-
from diffusers import DiffusionPipeline,
|
5 |
import torch
|
|
|
6 |
|
7 |
# Device configuration (explicitly set to CPU)
|
8 |
DEVICE = "cpu"
|
9 |
-
# Maximum Image Size (Defined at the top level)
|
10 |
-
MAX_IMAGE_SIZE = 1024
|
11 |
|
12 |
-
# Model Options (for
|
13 |
MODEL_OPTIONS = {
|
14 |
-
"
|
15 |
-
"
|
16 |
}
|
17 |
|
18 |
-
# Default to
|
19 |
-
DEFAULT_MODEL_ID = MODEL_OPTIONS["
|
20 |
|
21 |
-
|
22 |
-
|
23 |
-
|
24 |
-
# Load Model based on user selection
|
25 |
-
pipe = DiffusionPipeline.from_pretrained(
|
26 |
-
model_id, torch_dtype=torch.float32
|
27 |
-
)
|
28 |
|
29 |
-
|
30 |
-
if
|
|
|
|
|
|
|
|
|
|
|
31 |
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
|
|
|
|
|
|
32 |
|
33 |
-
|
|
|
|
|
|
|
|
|
34 |
|
35 |
generator = torch.Generator(device=DEVICE)
|
36 |
if not randomize_seed:
|
37 |
generator = generator.manual_seed(seed)
|
38 |
|
|
|
39 |
images = pipe(
|
40 |
prompt,
|
41 |
negative_prompt=negative_prompt,
|
@@ -47,45 +54,20 @@ def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height,
|
|
47 |
generator=generator,
|
48 |
).images
|
49 |
|
50 |
-
|
51 |
-
|
52 |
-
# Gradio Interface
|
53 |
-
with gr.Blocks(title="Генерація зображень за текстом", theme=gr.themes.Soft()) as demo:
|
54 |
-
gr.Markdown(
|
55 |
-
"""
|
56 |
-
## Text-to-Image Generation 🤖🎨
|
57 |
-
**Створіть дивовижні зображення зі своєї уяви!**
|
58 |
-
Введіть опис, налаштуйте параметри і дозвольте моделі створити для вас витвір мистецтва.
|
59 |
-
""")
|
60 |
|
61 |
-
|
62 |
-
prompt = gr.Textbox(label="Опис", placeholder="Напишіть ваш опис тут...")
|
63 |
-
negative_prompt = gr.Textbox(label="Негативний опис (необов'язково)")
|
64 |
|
65 |
-
with gr.Row():
|
66 |
-
seed = gr.Number(label="Початкове число", value=0)
|
67 |
-
randomize_seed = gr.Checkbox(label="Випадкове початкове число", value=True)
|
68 |
-
|
69 |
-
with gr.Row(): # Added this row for model selection
|
70 |
-
model_choice = gr.Radio(label="Виберіть модель", choices=list(MODEL_OPTIONS.keys()), value=DEFAULT_MODEL_ID)
|
71 |
|
72 |
-
|
73 |
-
width = gr.Slider(label="Ширина", minimum=256, maximum=MAX_IMAGE_SIZE, value=512, step=64)
|
74 |
-
height = gr.Slider(label="Висота", minimum=256, maximum=MAX_IMAGE_SIZE, value=512, step=64)
|
75 |
-
|
76 |
-
with gr.Accordion("Додаткові налаштування", open=False):
|
77 |
-
with gr.Row():
|
78 |
-
guidance_scale = gr.Slider(label="Рівень відповідності опису", minimum=0.0, maximum=20.0, value=7.5, step=0.1, info="Наскільки точно модель повинна слідувати опису.")
|
79 |
-
num_inference_steps = gr.Slider(label="Кількість кроків", minimum=10, maximum=100, value=50, step=5, info="Більше кроків може покращити якість, але займе більше часу.")
|
80 |
-
num_images = gr.Slider(label="Кількість зображень", minimum=1, maximum=4, value=1, step=1)
|
81 |
|
82 |
run_button = gr.Button("Згенерувати")
|
83 |
gallery = gr.Gallery(label="Згенеровані зображення")
|
|
|
84 |
|
85 |
run_button.click(
|
86 |
fn=generate_image,
|
87 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice],
|
88 |
-
outputs=gallery,
|
89 |
)
|
90 |
-
|
91 |
-
demo.launch(debug=True)
|
|
|
1 |
import gradio as gr
|
2 |
import numpy as np
|
3 |
import random
|
4 |
+
from diffusers import DiffusionPipeline, LMSDiscreteScheduler
|
5 |
import torch
|
6 |
+
import time # Import time for measuring generation time
|
7 |
|
8 |
# Device configuration (explicitly set to CPU)
|
9 |
DEVICE = "cpu"
|
|
|
|
|
10 |
|
11 |
+
# Model Options (optimized for CPU)
|
12 |
MODEL_OPTIONS = {
|
13 |
+
"Medium Quality (Faster)": "stabilityai/stable-diffusion-2-base",
|
14 |
+
"Fastest (Draft Quality)": "hf-internal-testing/tiny-stable-diffusion-pipe",
|
15 |
}
|
16 |
|
17 |
+
# Default to fastest model
|
18 |
+
DEFAULT_MODEL_ID = MODEL_OPTIONS["Fastest (Draft Quality)"]
|
19 |
|
20 |
+
# Cache models to avoid reloading them for each generation
|
21 |
+
PIPELINES = {}
|
|
|
|
|
|
|
|
|
|
|
22 |
|
23 |
+
def load_pipeline(model_id):
|
24 |
+
if model_id in PIPELINES:
|
25 |
+
return PIPELINES[model_id]
|
26 |
+
else:
|
27 |
+
pipe = DiffusionPipeline.from_pretrained(
|
28 |
+
model_id, torch_dtype=torch.float32
|
29 |
+
)
|
30 |
pipe.scheduler = LMSDiscreteScheduler.from_config(pipe.scheduler.config)
|
31 |
+
pipe = pipe.to(DEVICE)
|
32 |
+
PIPELINES[model_id] = pipe
|
33 |
+
return pipe
|
34 |
|
35 |
+
def generate_image(prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice):
|
36 |
+
if not prompt:
|
37 |
+
raise gr.Error("Будь ласка, введіть опис для зображення.")
|
38 |
+
|
39 |
+
pipe = load_pipeline(MODEL_OPTIONS[model_choice])
|
40 |
|
41 |
generator = torch.Generator(device=DEVICE)
|
42 |
if not randomize_seed:
|
43 |
generator = generator.manual_seed(seed)
|
44 |
|
45 |
+
start_time = time.time() # Record start time
|
46 |
images = pipe(
|
47 |
prompt,
|
48 |
negative_prompt=negative_prompt,
|
|
|
54 |
generator=generator,
|
55 |
).images
|
56 |
|
57 |
+
end_time = time.time()
|
58 |
+
generation_time = end_time - start_time
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
59 |
|
60 |
+
return images, f"Час генерації: {generation_time:.2f} секунд" # Return images and generation time
|
|
|
|
|
61 |
|
|
|
|
|
|
|
|
|
|
|
|
|
62 |
|
63 |
+
# ... (Gradio UI remains largely the same, with an added status text output)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
64 |
|
65 |
run_button = gr.Button("Згенерувати")
|
66 |
gallery = gr.Gallery(label="Згенеровані зображення")
|
67 |
+
status_text = gr.Textbox(label="Статус")
|
68 |
|
69 |
run_button.click(
|
70 |
fn=generate_image,
|
71 |
inputs=[prompt, negative_prompt, seed, randomize_seed, width, height, guidance_scale, num_inference_steps, num_images, model_choice],
|
72 |
+
outputs=[gallery, status_text], # Output both the gallery and status text
|
73 |
)
|
|
|
|