Spaces:
Runtime error
Runtime error
lets see
Browse files
app.py
CHANGED
@@ -9,10 +9,8 @@ from diffusers.utils import numpy_to_pil
|
|
9 |
from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline
|
10 |
from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
|
11 |
from previewer.modules import Previewer
|
12 |
-
from compel import Compel
|
13 |
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
|
14 |
|
15 |
-
|
16 |
DESCRIPTION = "# Würstchen"
|
17 |
DESCRIPTION += "\n<p style=\"text-align: center\"><a href='https://huggingface.co/warp-ai/wuerstchen' target='_blank'>Würstchen</a> is a new fast and efficient high resolution text-to-image architecture and model</p>"
|
18 |
if not torch.cuda.is_available():
|
@@ -53,7 +51,6 @@ if torch.cuda.is_available():
|
|
53 |
else:
|
54 |
previewer = None
|
55 |
callback_prior = None
|
56 |
-
compel_proc = Compel(tokenizer=prior_pipeline.tokenizer, text_encoder=prior_pipeline.text_encoder)
|
57 |
else:
|
58 |
prior_pipeline = None
|
59 |
decoder_pipeline = None
|
@@ -81,16 +78,12 @@ def generate(
|
|
81 |
) -> PIL.Image.Image:
|
82 |
generator = torch.Generator().manual_seed(seed)
|
83 |
|
84 |
-
print("Running compel")
|
85 |
-
prompt_embeds = compel_proc(prompt)
|
86 |
-
negative_prompt_embeds = compel_proc(negative_prompt)
|
87 |
-
|
88 |
prior_output = prior_pipeline(
|
89 |
-
|
90 |
height=height,
|
91 |
width=width,
|
92 |
timesteps=DEFAULT_STAGE_C_TIMESTEPS,
|
93 |
-
|
94 |
guidance_scale=prior_guidance_scale,
|
95 |
num_images_per_prompt=num_images_per_prompt,
|
96 |
generator=generator,
|
@@ -202,8 +195,8 @@ with gr.Blocks(css="style.css") as demo:
|
|
202 |
)
|
203 |
decoder_num_inference_steps = gr.Slider(
|
204 |
label="Decoder Inference Steps",
|
205 |
-
minimum=
|
206 |
-
maximum=
|
207 |
step=1,
|
208 |
value=12,
|
209 |
)
|
|
|
9 |
from diffusers import WuerstchenDecoderPipeline, WuerstchenPriorPipeline
|
10 |
from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
|
11 |
from previewer.modules import Previewer
|
|
|
12 |
os.environ['TOKENIZERS_PARALLELISM'] = 'false'
|
13 |
|
|
|
14 |
DESCRIPTION = "# Würstchen"
|
15 |
DESCRIPTION += "\n<p style=\"text-align: center\"><a href='https://huggingface.co/warp-ai/wuerstchen' target='_blank'>Würstchen</a> is a new fast and efficient high resolution text-to-image architecture and model</p>"
|
16 |
if not torch.cuda.is_available():
|
|
|
51 |
else:
|
52 |
previewer = None
|
53 |
callback_prior = None
|
|
|
54 |
else:
|
55 |
prior_pipeline = None
|
56 |
decoder_pipeline = None
|
|
|
78 |
) -> PIL.Image.Image:
|
79 |
generator = torch.Generator().manual_seed(seed)
|
80 |
|
|
|
|
|
|
|
|
|
81 |
prior_output = prior_pipeline(
|
82 |
+
prompt=prompt,
|
83 |
height=height,
|
84 |
width=width,
|
85 |
timesteps=DEFAULT_STAGE_C_TIMESTEPS,
|
86 |
+
negative_prompt=negative_prompt,
|
87 |
guidance_scale=prior_guidance_scale,
|
88 |
num_images_per_prompt=num_images_per_prompt,
|
89 |
generator=generator,
|
|
|
195 |
)
|
196 |
decoder_num_inference_steps = gr.Slider(
|
197 |
label="Decoder Inference Steps",
|
198 |
+
minimum=4,
|
199 |
+
maximum=12,
|
200 |
step=1,
|
201 |
value=12,
|
202 |
)
|