Spaces:
Running
on
A10G
Running
on
A10G
Commit
•
7f11b82
1
Parent(s):
65fd06d
Update app.py
Browse files
app.py
CHANGED
@@ -11,7 +11,7 @@ import PIL.Image
|
|
11 |
import torch
|
12 |
from diffusers import DiffusionPipeline
|
13 |
|
14 |
-
DESCRIPTION = '
|
15 |
if not torch.cuda.is_available():
|
16 |
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
17 |
|
@@ -21,6 +21,7 @@ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
|
|
21 |
MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
|
22 |
USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
|
23 |
ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
|
|
|
24 |
|
25 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
26 |
if torch.cuda.is_available():
|
@@ -50,7 +51,12 @@ else:
|
|
50 |
pipe = None
|
51 |
refiner = None
|
52 |
|
53 |
-
|
|
|
|
|
|
|
|
|
|
|
54 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
55 |
if randomize_seed:
|
56 |
seed = random.randint(0, MAX_SEED)
|
@@ -121,11 +127,13 @@ examples = [
|
|
121 |
|
122 |
with gr.Blocks(css='style.css') as demo:
|
123 |
gr.Markdown(DESCRIPTION)
|
124 |
-
gr.DuplicateButton(value='Duplicate Space for private use',
|
125 |
-
elem_id='duplicate-button',
|
126 |
-
visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
|
127 |
with gr.Box():
|
128 |
with gr.Row():
|
|
|
|
|
|
|
|
|
|
|
129 |
prompt = gr.Text(
|
130 |
label='Prompt',
|
131 |
show_label=False,
|
@@ -261,8 +269,15 @@ with gr.Blocks(css='style.css') as demo:
|
|
261 |
num_inference_steps_base,
|
262 |
num_inference_steps_refiner,
|
263 |
apply_refiner,
|
|
|
264 |
]
|
265 |
prompt.submit(
|
|
|
|
|
|
|
|
|
|
|
|
|
266 |
fn=randomize_seed_fn,
|
267 |
inputs=[seed, randomize_seed],
|
268 |
outputs=seed,
|
@@ -298,4 +313,4 @@ with gr.Blocks(css='style.css') as demo:
|
|
298 |
outputs=result,
|
299 |
api_name=False,
|
300 |
)
|
301 |
-
demo.queue(max_size=
|
|
|
11 |
import torch
|
12 |
from diffusers import DiffusionPipeline
|
13 |
|
14 |
+
DESCRIPTION = 'This space is an API service meant to be used by VideoChain and VideoQuest.\nWant to use this space for yourself? Please use the original code: [https://huggingface.co/spaces/hysts/SD-XL](https://huggingface.co/spaces/hysts/SD-XL)'
|
15 |
if not torch.cuda.is_available():
|
16 |
DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
|
17 |
|
|
|
21 |
MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
|
22 |
USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
|
23 |
ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
|
24 |
+
SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
|
25 |
|
26 |
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
|
27 |
if torch.cuda.is_available():
|
|
|
51 |
pipe = None
|
52 |
refiner = None
|
53 |
|
54 |
+
def check_secret_token(token: str) -> str:
|
55 |
+
"""Raises an error if the token does not match the secret token."""
|
56 |
+
if token != SECRET_TOKEN:
|
57 |
+
raise ValueError("Invalid secret token!")
|
58 |
+
return token
|
59 |
+
|
60 |
def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
|
61 |
if randomize_seed:
|
62 |
seed = random.randint(0, MAX_SEED)
|
|
|
127 |
|
128 |
with gr.Blocks(css='style.css') as demo:
|
129 |
gr.Markdown(DESCRIPTION)
|
|
|
|
|
|
|
130 |
with gr.Box():
|
131 |
with gr.Row():
|
132 |
+
secret_token = gr.Text(
|
133 |
+
label='Secret Token',
|
134 |
+
max_lines=1,
|
135 |
+
placeholder='Enter your secret token',
|
136 |
+
)
|
137 |
prompt = gr.Text(
|
138 |
label='Prompt',
|
139 |
show_label=False,
|
|
|
269 |
num_inference_steps_base,
|
270 |
num_inference_steps_refiner,
|
271 |
apply_refiner,
|
272 |
+
secret_token,
|
273 |
]
|
274 |
prompt.submit(
|
275 |
+
fn=check_secret_token,
|
276 |
+
inputs=[secret_token],
|
277 |
+
outputs=gr.outputs.Void(),
|
278 |
+
queue=False,
|
279 |
+
api_name=False,
|
280 |
+
).then(
|
281 |
fn=randomize_seed_fn,
|
282 |
inputs=[seed, randomize_seed],
|
283 |
outputs=seed,
|
|
|
313 |
outputs=result,
|
314 |
api_name=False,
|
315 |
)
|
316 |
+
demo.queue(max_size=2).launch()
|