jbilcke-hf HF staff commited on
Commit
0bccba3
1 Parent(s): e7442ec

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +19 -4
app.py CHANGED
@@ -11,7 +11,7 @@ import PIL.Image
11
  import torch
12
  from diffusers import DiffusionPipeline
13
 
14
- DESCRIPTION = '# SD-XL'
15
  if not torch.cuda.is_available():
16
  DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
17
 
@@ -21,6 +21,7 @@ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv(
21
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
  USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
  ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
 
24
 
25
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
26
  if torch.cuda.is_available():
@@ -54,6 +55,11 @@ else:
54
  pipe = None
55
  refiner = None
56
 
 
 
 
 
 
57
 
58
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
59
  if randomize_seed:
@@ -125,11 +131,13 @@ examples = [
125
 
126
  with gr.Blocks(css='style.css') as demo:
127
  gr.Markdown(DESCRIPTION)
128
- gr.DuplicateButton(value='Duplicate Space for private use',
129
- elem_id='duplicate-button',
130
- visible=os.getenv('SHOW_DUPLICATE_BUTTON') == '1')
131
  with gr.Box():
132
  with gr.Row():
 
 
 
 
 
133
  prompt = gr.Text(
134
  label='Prompt',
135
  show_label=False,
@@ -265,8 +273,15 @@ with gr.Blocks(css='style.css') as demo:
265
  num_inference_steps_base,
266
  num_inference_steps_refiner,
267
  apply_refiner,
 
268
  ]
269
  prompt.submit(
 
 
 
 
 
 
270
  fn=randomize_seed_fn,
271
  inputs=[seed, randomize_seed],
272
  outputs=seed,
 
11
  import torch
12
  from diffusers import DiffusionPipeline
13
 
14
+ DESCRIPTION = 'This space is an API service meant to be used by VideoChain and VideoQuest.\nWant to use this space for yourself? Please use the original code: [https://huggingface.co/spaces/hysts/SD-XL](https://huggingface.co/spaces/hysts/SD-XL)'
15
  if not torch.cuda.is_available():
16
  DESCRIPTION += '\n<p>Running on CPU 🥶 This demo does not work on CPU.</p>'
17
 
 
21
  MAX_IMAGE_SIZE = int(os.getenv('MAX_IMAGE_SIZE', '1024'))
22
  USE_TORCH_COMPILE = os.getenv('USE_TORCH_COMPILE') == '1'
23
  ENABLE_CPU_OFFLOAD = os.getenv('ENABLE_CPU_OFFLOAD') == '1'
24
+ SECRET_TOKEN = os.getenv('SECRET_TOKEN', 'default_secret')
25
 
26
  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
27
  if torch.cuda.is_available():
 
55
  pipe = None
56
  refiner = None
57
 
58
+ def check_secret_token(token: str) -> str:
59
+ """Raises an error if the token does not match the secret token."""
60
+ if token != SECRET_TOKEN:
61
+ raise ValueError("Invalid secret token!")
62
+ return token
63
 
64
  def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
65
  if randomize_seed:
 
131
 
132
  with gr.Blocks(css='style.css') as demo:
133
  gr.Markdown(DESCRIPTION)
 
 
 
134
  with gr.Box():
135
  with gr.Row():
136
+ secret_token = gr.Text(
137
+ label='Secret Token',
138
+ max_lines=1,
139
+ placeholder='Enter your secret token',
140
+ )
141
  prompt = gr.Text(
142
  label='Prompt',
143
  show_label=False,
 
273
  num_inference_steps_base,
274
  num_inference_steps_refiner,
275
  apply_refiner,
276
+ secret_token,
277
  ]
278
  prompt.submit(
279
+ fn=check_secret_token,
280
+ inputs=[secret_token],
281
+ outputs=gr.outputs.Void(),
282
+ queue=False,
283
+ api_name=False,
284
+ ).then(
285
  fn=randomize_seed_fn,
286
  inputs=[seed, randomize_seed],
287
  outputs=seed,